2024-11-23 06:34:55,017 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-23 06:34:55,038 main DEBUG Took 0.017550 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 06:34:55,039 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 06:34:55,039 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 06:34:55,041 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 06:34:55,043 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,051 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 06:34:55,064 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,066 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,067 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,068 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,069 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,069 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,070 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,070 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,071 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,071 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,072 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,073 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,074 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,074 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,075 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,075 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,076 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,076 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,077 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,077 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,079 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,079 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,080 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,080 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 06:34:55,081 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,081 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 06:34:55,083 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 06:34:55,085 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 06:34:55,087 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 06:34:55,087 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 06:34:55,089 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 06:34:55,089 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 06:34:55,102 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 06:34:55,107 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 06:34:55,109 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 06:34:55,110 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 06:34:55,110 main DEBUG createAppenders(={Console}) 2024-11-23 06:34:55,111 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-23 06:34:55,112 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-23 06:34:55,112 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-23 06:34:55,113 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 06:34:55,113 main DEBUG OutputStream closed 2024-11-23 06:34:55,114 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 06:34:55,114 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 06:34:55,114 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-23 06:34:55,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 06:34:55,231 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 06:34:55,234 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 06:34:55,237 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 06:34:55,240 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 06:34:55,241 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 06:34:55,242 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 06:34:55,248 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 06:34:55,249 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 06:34:55,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 06:34:55,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 06:34:55,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 06:34:55,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 06:34:55,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 06:34:55,253 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 06:34:55,253 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 06:34:55,254 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 06:34:55,255 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 06:34:55,258 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 06:34:55,258 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-23 06:34:55,259 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 06:34:55,260 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-23T06:34:55,590 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9 2024-11-23 06:34:55,595 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 06:34:55,596 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T06:34:55,607 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-23T06:34:55,667 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=248, ProcessCount=11, AvailableMemoryMB=8412 2024-11-23T06:34:55,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:34:55,701 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401, deleteOnExit=true 2024-11-23T06:34:55,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:34:55,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/test.cache.data in system properties and HBase conf 2024-11-23T06:34:55,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:34:55,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:34:55,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:34:55,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:34:55,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:34:55,834 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T06:34:55,952 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:34:55,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:34:55,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:34:55,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:34:55,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:34:55,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:34:55,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:34:55,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:34:55,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:34:55,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:34:55,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:34:55,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:34:55,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:34:55,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:34:55,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:34:56,430 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:34:57,032 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T06:34:57,103 INFO [Time-limited test {}] log.Log(170): Logging initialized @3099ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T06:34:57,168 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:34:57,228 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:34:57,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:34:57,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:34:57,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:34:57,258 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:34:57,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:34:57,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:34:57,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/java.io.tmpdir/jetty-localhost-37521-hadoop-hdfs-3_4_1-tests_jar-_-any-6009762047257548942/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:34:57,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37521} 2024-11-23T06:34:57,444 INFO [Time-limited test {}] server.Server(415): Started @3440ms 2024-11-23T06:34:57,470 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:34:58,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:34:58,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:34:58,030 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:34:58,030 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:34:58,030 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:34:58,031 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:34:58,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:34:58,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/java.io.tmpdir/jetty-localhost-39061-hadoop-hdfs-3_4_1-tests_jar-_-any-17098895138547619345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:34:58,143 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39061} 2024-11-23T06:34:58,143 INFO [Time-limited test {}] server.Server(415): Started @4140ms 2024-11-23T06:34:58,196 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:34:58,302 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:34:58,307 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:34:58,309 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:34:58,309 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:34:58,309 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:34:58,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:34:58,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:34:58,415 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/java.io.tmpdir/jetty-localhost-41141-hadoop-hdfs-3_4_1-tests_jar-_-any-4906460946028579628/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:34:58,416 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:41141} 2024-11-23T06:34:58,416 INFO [Time-limited test {}] server.Server(415): Started @4413ms 2024-11-23T06:34:58,418 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:34:59,528 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data1/current/BP-1989195401-172.17.0.3-1732343696509/current, will proceed with Du for space computation calculation, 2024-11-23T06:34:59,528 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data2/current/BP-1989195401-172.17.0.3-1732343696509/current, will proceed with Du for space computation calculation, 2024-11-23T06:34:59,528 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data3/current/BP-1989195401-172.17.0.3-1732343696509/current, will proceed with Du for space computation calculation, 2024-11-23T06:34:59,528 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data4/current/BP-1989195401-172.17.0.3-1732343696509/current, will proceed with Du for space computation calculation, 2024-11-23T06:34:59,558 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:34:59,558 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:34:59,603 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc401ba6a056fa50 with lease ID 0x855541bd412b81de: Processing first storage report for DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc from datanode DatanodeRegistration(127.0.0.1:46207, datanodeUuid=5cb5e92e-71c0-46f3-9564-3af36a225197, infoPort=39903, infoSecurePort=0, ipcPort=41857, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509) 2024-11-23T06:34:59,604 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc401ba6a056fa50 with lease ID 0x855541bd412b81de: from storage DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc node DatanodeRegistration(127.0.0.1:46207, datanodeUuid=5cb5e92e-71c0-46f3-9564-3af36a225197, infoPort=39903, infoSecurePort=0, ipcPort=41857, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T06:34:59,604 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53dff430342a1bf6 with lease ID 0x855541bd412b81dd: Processing first storage report for DS-2ceba544-63fe-4162-9ba1-c26b895e13ac from datanode DatanodeRegistration(127.0.0.1:32975, datanodeUuid=f2d12bfb-3015-4b89-9b39-ca97c3559cb6, infoPort=38445, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509) 2024-11-23T06:34:59,604 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53dff430342a1bf6 with lease ID 0x855541bd412b81dd: from storage DS-2ceba544-63fe-4162-9ba1-c26b895e13ac node DatanodeRegistration(127.0.0.1:32975, datanodeUuid=f2d12bfb-3015-4b89-9b39-ca97c3559cb6, infoPort=38445, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:34:59,605 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc401ba6a056fa50 with lease ID 0x855541bd412b81de: Processing first storage report for DS-f89665f4-08ab-4034-8eb3-6383dd88e311 from datanode DatanodeRegistration(127.0.0.1:46207, datanodeUuid=5cb5e92e-71c0-46f3-9564-3af36a225197, infoPort=39903, infoSecurePort=0, ipcPort=41857, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509) 2024-11-23T06:34:59,605 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc401ba6a056fa50 with lease ID 0x855541bd412b81de: from storage DS-f89665f4-08ab-4034-8eb3-6383dd88e311 node DatanodeRegistration(127.0.0.1:46207, datanodeUuid=5cb5e92e-71c0-46f3-9564-3af36a225197, infoPort=39903, infoSecurePort=0, ipcPort=41857, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:34:59,605 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53dff430342a1bf6 with lease ID 0x855541bd412b81dd: Processing first storage report for DS-5974ef6f-18fd-42f0-b35e-97692eed74f1 from datanode DatanodeRegistration(127.0.0.1:32975, datanodeUuid=f2d12bfb-3015-4b89-9b39-ca97c3559cb6, infoPort=38445, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509) 2024-11-23T06:34:59,605 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53dff430342a1bf6 with lease ID 0x855541bd412b81dd: from storage DS-5974ef6f-18fd-42f0-b35e-97692eed74f1 node DatanodeRegistration(127.0.0.1:32975, datanodeUuid=f2d12bfb-3015-4b89-9b39-ca97c3559cb6, infoPort=38445, infoSecurePort=0, ipcPort=33389, storageInfo=lv=-57;cid=testClusterID;nsid=1234592179;c=1732343696509), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:34:59,648 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9 2024-11-23T06:34:59,707 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/zookeeper_0, clientPort=51410, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:34:59,715 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51410 2024-11-23T06:34:59,724 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:34:59,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:34:59,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:34:59,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:35:00,325 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958 with version=8 2024-11-23T06:35:00,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:35:00,405 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T06:35:00,676 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:35:00,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:35:00,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:35:00,695 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:35:00,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:35:00,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:35:00,853 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:35:00,910 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T06:35:00,920 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T06:35:00,924 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:35:00,950 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 97267 (auto-detected) 2024-11-23T06:35:00,951 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-23T06:35:00,974 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44363 2024-11-23T06:35:01,008 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44363 connecting to ZooKeeper ensemble=127.0.0.1:51410 2024-11-23T06:35:01,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443630x0, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:35:01,162 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44363-0x101666714980000 connected 2024-11-23T06:35:01,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:35:01,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:35:01,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:35:01,271 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958, hbase.cluster.distributed=false 2024-11-23T06:35:01,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:35:01,299 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44363 2024-11-23T06:35:01,300 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44363 2024-11-23T06:35:01,300 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44363 2024-11-23T06:35:01,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44363 2024-11-23T06:35:01,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44363 2024-11-23T06:35:01,405 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:35:01,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:35:01,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:35:01,407 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:35:01,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:35:01,408 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:35:01,410 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:35:01,414 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:35:01,415 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46791 2024-11-23T06:35:01,418 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46791 connecting to ZooKeeper ensemble=127.0.0.1:51410 2024-11-23T06:35:01,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:35:01,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:35:01,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467910x0, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:35:01,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:35:01,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46791-0x101666714980001 connected 2024-11-23T06:35:01,455 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:35:01,464 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:35:01,467 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:35:01,473 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:35:01,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46791 2024-11-23T06:35:01,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46791 2024-11-23T06:35:01,475 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46791 2024-11-23T06:35:01,478 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46791 2024-11-23T06:35:01,478 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46791 2024-11-23T06:35:01,500 DEBUG [M:0;df2f15951535:44363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:44363 2024-11-23T06:35:01,501 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,44363,1732343700498 2024-11-23T06:35:01,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:35:01,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:35:01,513 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,44363,1732343700498 2024-11-23T06:35:01,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:35:01,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:01,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:01,543 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:35:01,544 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,44363,1732343700498 from backup master directory 2024-11-23T06:35:01,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,44363,1732343700498 2024-11-23T06:35:01,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:35:01,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:35:01,553 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:35:01,553 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,44363,1732343700498 2024-11-23T06:35:01,555 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T06:35:01,556 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T06:35:01,628 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase.id] with ID: 8bb9cd0a-e56f-4f92-b8e4-4dc2bbabff6f 2024-11-23T06:35:01,629 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/.tmp/hbase.id 2024-11-23T06:35:01,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:35:01,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:35:01,650 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/.tmp/hbase.id]:[hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase.id] 2024-11-23T06:35:01,710 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:35:01,715 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:35:01,737 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-11-23T06:35:01,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:01,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:01,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:35:01,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:35:01,800 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:35:01,803 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:35:01,818 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:35:01,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:35:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:35:01,887 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store 2024-11-23T06:35:01,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:35:01,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:35:02,316 INFO [master/df2f15951535:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T06:35:02,318 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:35:02,319 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:35:02,320 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:35:02,320 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:35:02,321 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:35:02,321 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:35:02,321 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:35:02,322 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343702319Disabling compacts and flushes for region at 1732343702319Disabling writes for close at 1732343702321 (+2 ms)Writing region close event to WAL at 1732343702321Closed at 1732343702321 2024-11-23T06:35:02,324 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/.initializing 2024-11-23T06:35:02,325 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/WALs/df2f15951535,44363,1732343700498 2024-11-23T06:35:02,352 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C44363%2C1732343700498, suffix=, logDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/WALs/df2f15951535,44363,1732343700498, archiveDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/oldWALs, maxLogs=10 2024-11-23T06:35:02,365 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C44363%2C1732343700498.1732343702359 2024-11-23T06:35:02,396 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/WALs/df2f15951535,44363,1732343700498/df2f15951535%2C44363%2C1732343700498.1732343702359 2024-11-23T06:35:02,409 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:38445:38445)] 2024-11-23T06:35:02,411 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:35:02,412 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:35:02,416 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,417 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:35:02,497 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:02,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:02,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:35:02,504 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:02,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:35:02,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:35:02,508 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:02,510 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:35:02,510 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:35:02,513 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:02,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:35:02,515 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,519 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,520 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,526 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,527 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,530 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:35:02,533 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:35:02,538 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:35:02,539 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868752, jitterRate=0.10467644035816193}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:35:02,546 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343702435Initializing all the Stores at 1732343702438 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343702439 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343702440 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343702440Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343702440Cleaning up temporary data from old regions at 1732343702527 (+87 ms)Region opened successfully at 1732343702546 (+19 ms) 2024-11-23T06:35:02,548 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:35:02,579 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1de50964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:35:02,605 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:35:02,614 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:35:02,614 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:35:02,616 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:35:02,618 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T06:35:02,622 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-11-23T06:35:02,622 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:35:02,645 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:35:02,653 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:35:02,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:35:02,786 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:35:02,788 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:35:02,804 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:35:02,807 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:35:02,811 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:35:02,825 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:35:02,827 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:35:02,836 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:35:02,853 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:35:02,857 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:35:02,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:35:02,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:35:02,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:02,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:02,870 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,44363,1732343700498, sessionid=0x101666714980000, setting cluster-up flag (Was=false) 2024-11-23T06:35:02,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:02,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:02,931 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:35:02,933 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,44363,1732343700498 2024-11-23T06:35:02,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:02,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:02,983 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:35:02,985 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,44363,1732343700498 2024-11-23T06:35:02,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:35:03,079 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:35:03,084 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(746): ClusterId : 8bb9cd0a-e56f-4f92-b8e4-4dc2bbabff6f 2024-11-23T06:35:03,086 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:35:03,091 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:35:03,100 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:35:03,102 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:35:03,102 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:35:03,106 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,44363,1732343700498 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:35:03,111 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:35:03,112 DEBUG [RS:0;df2f15951535:46791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e399c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:35:03,115 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:35:03,115 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:35:03,115 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:35:03,116 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:35:03,116 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:35:03,116 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,116 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:35:03,116 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,122 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732343733122 2024-11-23T06:35:03,124 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:35:03,125 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:35:03,125 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:35:03,126 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:35:03,129 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:46791 2024-11-23T06:35:03,129 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:35:03,129 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:35:03,129 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:35:03,129 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:35:03,130 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,132 INFO [RS:0;df2f15951535:46791 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:35:03,132 INFO [RS:0;df2f15951535:46791 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:35:03,132 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:35:03,133 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,134 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:35:03,133 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:35:03,135 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:35:03,135 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:35:03,136 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,44363,1732343700498 with port=46791, startcode=1732343701363 2024-11-23T06:35:03,137 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:35:03,137 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:35:03,139 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343703139,5,FailOnTimeoutGroup] 2024-11-23T06:35:03,140 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343703139,5,FailOnTimeoutGroup] 2024-11-23T06:35:03,140 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,140 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:35:03,142 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,142 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:35:03,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:35:03,148 DEBUG [RS:0;df2f15951535:46791 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:35:03,149 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:35:03,150 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958 2024-11-23T06:35:03,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:35:03,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:35:03,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:35:03,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:35:03,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:35:03,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:35:03,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:35:03,174 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:35:03,179 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:35:03,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:35:03,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:35:03,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:35:03,189 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740 2024-11-23T06:35:03,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740 2024-11-23T06:35:03,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:35:03,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:35:03,197 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:35:03,199 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:35:03,205 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:35:03,206 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801971, jitterRate=0.01975969970226288}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:35:03,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343703164Initializing all the Stores at 1732343703166 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343703166Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343703166Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343703166Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343703166Cleaning up temporary data from old regions at 1732343703195 (+29 ms)Region opened successfully at 1732343703210 (+15 ms) 2024-11-23T06:35:03,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:35:03,211 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:35:03,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:35:03,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:35:03,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:35:03,212 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:35:03,212 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343703211Disabling compacts and flushes for region at 1732343703211Disabling writes for close at 1732343703211Writing region close event to WAL at 1732343703212 (+1 ms)Closed at 1732343703212 2024-11-23T06:35:03,216 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:35:03,216 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:35:03,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:35:03,226 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42357, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:35:03,232 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:35:03,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44363 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,46791,1732343701363 2024-11-23T06:35:03,235 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:35:03,239 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44363 {}] master.ServerManager(517): Registering regionserver=df2f15951535,46791,1732343701363 2024-11-23T06:35:03,253 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958 2024-11-23T06:35:03,253 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43385 2024-11-23T06:35:03,253 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:35:03,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:35:03,268 DEBUG [RS:0;df2f15951535:46791 {}] zookeeper.ZKUtil(111): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,46791,1732343701363 2024-11-23T06:35:03,268 WARN [RS:0;df2f15951535:46791 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:35:03,268 INFO [RS:0;df2f15951535:46791 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:35:03,269 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363 2024-11-23T06:35:03,270 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,46791,1732343701363] 2024-11-23T06:35:03,291 INFO [RS:0;df2f15951535:46791 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:35:03,305 INFO [RS:0;df2f15951535:46791 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:35:03,309 INFO [RS:0;df2f15951535:46791 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:35:03,309 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,310 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:35:03,315 INFO [RS:0;df2f15951535:46791 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:35:03,316 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,316 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,317 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,318 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,318 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,318 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,318 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:35:03,318 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:35:03,318 DEBUG [RS:0;df2f15951535:46791 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:35:03,319 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,319 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,319 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,320 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,320 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,320 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,46791,1732343701363-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:35:03,335 INFO [RS:0;df2f15951535:46791 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:35:03,337 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,46791,1732343701363-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,337 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,337 INFO [RS:0;df2f15951535:46791 {}] regionserver.Replication(171): df2f15951535,46791,1732343701363 started 2024-11-23T06:35:03,357 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:03,357 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,46791,1732343701363, RpcServer on df2f15951535/172.17.0.3:46791, sessionid=0x101666714980001 2024-11-23T06:35:03,358 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:35:03,358 DEBUG [RS:0;df2f15951535:46791 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,46791,1732343701363 2024-11-23T06:35:03,359 DEBUG [RS:0;df2f15951535:46791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,46791,1732343701363' 2024-11-23T06:35:03,359 DEBUG [RS:0;df2f15951535:46791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:35:03,360 DEBUG [RS:0;df2f15951535:46791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:35:03,361 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:35:03,361 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:35:03,361 DEBUG [RS:0;df2f15951535:46791 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,46791,1732343701363 2024-11-23T06:35:03,361 DEBUG [RS:0;df2f15951535:46791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,46791,1732343701363' 2024-11-23T06:35:03,361 DEBUG [RS:0;df2f15951535:46791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:35:03,362 DEBUG [RS:0;df2f15951535:46791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:35:03,362 DEBUG [RS:0;df2f15951535:46791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:35:03,363 INFO [RS:0;df2f15951535:46791 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:35:03,363 INFO [RS:0;df2f15951535:46791 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:35:03,387 WARN [df2f15951535:44363 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T06:35:03,469 INFO [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C46791%2C1732343701363, suffix=, logDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363, archiveDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs, maxLogs=32 2024-11-23T06:35:03,472 INFO [RS:0;df2f15951535:46791 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343703472 2024-11-23T06:35:03,484 INFO [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343703472 2024-11-23T06:35:03,489 DEBUG [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:38445:38445)] 2024-11-23T06:35:03,639 DEBUG [df2f15951535:44363 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:35:03,649 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,46791,1732343701363 2024-11-23T06:35:03,656 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,46791,1732343701363, state=OPENING 2024-11-23T06:35:03,667 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:35:03,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:03,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:35:03,679 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:35:03,679 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:35:03,680 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:35:03,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,46791,1732343701363}] 2024-11-23T06:35:03,858 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:35:03,861 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52451, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:35:03,872 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:35:03,872 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:35:03,875 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C46791%2C1732343701363.meta, suffix=.meta, logDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363, archiveDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs, maxLogs=32 2024-11-23T06:35:03,878 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.meta.1732343703877.meta 2024-11-23T06:35:03,885 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.meta.1732343703877.meta 2024-11-23T06:35:03,888 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:38445:38445)] 2024-11-23T06:35:03,889 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:35:03,891 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:35:03,893 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:35:03,898 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:35:03,903 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:35:03,904 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:35:03,904 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:35:03,904 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:35:03,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:35:03,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:35:03,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:35:03,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:35:03,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:35:03,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:35:03,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:35:03,919 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:35:03,920 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:03,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:35:03,921 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:35:03,923 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740 2024-11-23T06:35:03,926 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740 2024-11-23T06:35:03,929 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:35:03,929 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:35:03,930 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:35:03,934 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:35:03,936 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805216, jitterRate=0.023885279893875122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:35:03,936 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:35:03,938 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343703905Writing region info on filesystem at 1732343703905Initializing all the Stores at 1732343703907 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343703907Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343703908 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343703908Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343703908Cleaning up temporary data from old regions at 1732343703929 (+21 ms)Running coprocessor post-open hooks at 1732343703936 (+7 ms)Region opened successfully at 1732343703938 (+2 ms) 2024-11-23T06:35:03,946 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343703848 2024-11-23T06:35:03,959 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:35:03,960 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:35:03,961 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,46791,1732343701363 2024-11-23T06:35:03,964 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,46791,1732343701363, state=OPEN 2024-11-23T06:35:04,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:35:04,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:35:04,007 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:35:04,007 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:35:04,007 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,46791,1732343701363 2024-11-23T06:35:04,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:35:04,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,46791,1732343701363 in 326 msec 2024-11-23T06:35:04,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:35:04,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 794 msec 2024-11-23T06:35:04,022 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:35:04,023 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:35:04,041 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:35:04,042 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,46791,1732343701363, seqNum=-1] 2024-11-23T06:35:04,065 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:35:04,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60911, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:35:04,088 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0620 sec 2024-11-23T06:35:04,088 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343704088, completionTime=-1 2024-11-23T06:35:04,091 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:35:04,091 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:35:04,116 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:35:04,116 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732343764116 2024-11-23T06:35:04,116 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732343824116 2024-11-23T06:35:04,116 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-11-23T06:35:04,120 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44363,1732343700498-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,120 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44363,1732343700498-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,120 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44363,1732343700498-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,122 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:44363, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,122 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,123 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,130 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:35:04,151 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.598sec 2024-11-23T06:35:04,152 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:35:04,153 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:35:04,154 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:35:04,155 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:35:04,155 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:35:04,155 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44363,1732343700498-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:35:04,156 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44363,1732343700498-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:35:04,163 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:35:04,164 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:35:04,164 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44363,1732343700498-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:35:04,193 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33c17658, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:35:04,195 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T06:35:04,195 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T06:35:04,198 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,44363,-1 for getting cluster id 2024-11-23T06:35:04,201 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:35:04,209 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8bb9cd0a-e56f-4f92-b8e4-4dc2bbabff6f' 2024-11-23T06:35:04,213 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:35:04,213 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8bb9cd0a-e56f-4f92-b8e4-4dc2bbabff6f" 2024-11-23T06:35:04,217 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29842d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:35:04,217 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,44363,-1] 2024-11-23T06:35:04,221 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:35:04,224 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:35:04,225 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39488, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:35:04,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d9e869, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:35:04,229 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:35:04,239 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,46791,1732343701363, seqNum=-1] 2024-11-23T06:35:04,246 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:35:04,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34154, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:35:04,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,44363,1732343700498 2024-11-23T06:35:04,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:35:04,285 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:35:04,291 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T06:35:04,297 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is df2f15951535,44363,1732343700498 2024-11-23T06:35:04,300 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5577e14 2024-11-23T06:35:04,301 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T06:35:04,304 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39496, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T06:35:04,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T06:35:04,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T06:35:04,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:35:04,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-23T06:35:04,327 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T06:35:04,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-23T06:35:04,329 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:04,352 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T06:35:04,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:35:04,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741835_1011 (size=389) 2024-11-23T06:35:04,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741835_1011 (size=389) 2024-11-23T06:35:04,412 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5a377456a28ce5815f6ef08b53b3c4e8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958 2024-11-23T06:35:04,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741836_1012 (size=72) 2024-11-23T06:35:04,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741836_1012 (size=72) 2024-11-23T06:35:04,426 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:35:04,426 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5a377456a28ce5815f6ef08b53b3c4e8, disabling compactions & flushes 2024-11-23T06:35:04,426 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,426 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,426 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. after waiting 0 ms 2024-11-23T06:35:04,426 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,426 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,426 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5a377456a28ce5815f6ef08b53b3c4e8: Waiting for close lock at 1732343704426Disabling compacts and flushes for region at 1732343704426Disabling writes for close at 1732343704426Writing region close event to WAL at 1732343704426Closed at 1732343704426 2024-11-23T06:35:04,429 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T06:35:04,433 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732343704429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343704429"}]},"ts":"1732343704429"} 2024-11-23T06:35:04,439 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T06:35:04,441 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T06:35:04,444 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343704441"}]},"ts":"1732343704441"} 2024-11-23T06:35:04,449 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-23T06:35:04,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5a377456a28ce5815f6ef08b53b3c4e8, ASSIGN}] 2024-11-23T06:35:04,453 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5a377456a28ce5815f6ef08b53b3c4e8, ASSIGN 2024-11-23T06:35:04,455 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5a377456a28ce5815f6ef08b53b3c4e8, ASSIGN; state=OFFLINE, location=df2f15951535,46791,1732343701363; forceNewPlan=false, retain=false 2024-11-23T06:35:04,608 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5a377456a28ce5815f6ef08b53b3c4e8, regionState=OPENING, regionLocation=df2f15951535,46791,1732343701363 2024-11-23T06:35:04,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5a377456a28ce5815f6ef08b53b3c4e8, ASSIGN because future has completed 2024-11-23T06:35:04,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a377456a28ce5815f6ef08b53b3c4e8, server=df2f15951535,46791,1732343701363}] 2024-11-23T06:35:04,778 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,779 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5a377456a28ce5815f6ef08b53b3c4e8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:35:04,779 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,779 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:35:04,779 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,780 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,782 INFO [StoreOpener-5a377456a28ce5815f6ef08b53b3c4e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,785 INFO [StoreOpener-5a377456a28ce5815f6ef08b53b3c4e8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a377456a28ce5815f6ef08b53b3c4e8 columnFamilyName info 2024-11-23T06:35:04,785 DEBUG [StoreOpener-5a377456a28ce5815f6ef08b53b3c4e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:35:04,787 INFO [StoreOpener-5a377456a28ce5815f6ef08b53b3c4e8-1 {}] regionserver.HStore(327): Store=5a377456a28ce5815f6ef08b53b3c4e8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:35:04,787 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,789 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,790 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,791 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,791 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,793 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,797 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:35:04,799 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5a377456a28ce5815f6ef08b53b3c4e8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882507, jitterRate=0.12216687202453613}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:35:04,799 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:04,800 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5a377456a28ce5815f6ef08b53b3c4e8: Running coprocessor pre-open hook at 1732343704780Writing region info on filesystem at 1732343704780Initializing all the Stores at 1732343704781 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343704781Cleaning up temporary data from old regions at 1732343704791 (+10 ms)Running coprocessor post-open hooks at 1732343704799 (+8 ms)Region opened successfully at 1732343704800 (+1 ms) 2024-11-23T06:35:04,802 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8., pid=6, masterSystemTime=1732343704771 2024-11-23T06:35:04,807 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,807 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:04,808 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5a377456a28ce5815f6ef08b53b3c4e8, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,46791,1732343701363 2024-11-23T06:35:04,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a377456a28ce5815f6ef08b53b3c4e8, server=df2f15951535,46791,1732343701363 because future has completed 2024-11-23T06:35:04,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T06:35:04,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5a377456a28ce5815f6ef08b53b3c4e8, server=df2f15951535,46791,1732343701363 in 197 msec 2024-11-23T06:35:04,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T06:35:04,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=5a377456a28ce5815f6ef08b53b3c4e8, ASSIGN in 367 msec 2024-11-23T06:35:04,824 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T06:35:04,825 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343704825"}]},"ts":"1732343704825"} 2024-11-23T06:35:04,829 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-23T06:35:04,831 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T06:35:04,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 515 msec 2024-11-23T06:35:09,422 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T06:35:09,484 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T06:35:09,485 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-23T06:35:10,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:35:10,909 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T06:35:10,912 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-23T06:35:10,913 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-23T06:35:10,914 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:35:10,914 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T06:35:10,914 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T06:35:10,914 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T06:35:14,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44363 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:35:14,411 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-23T06:35:14,415 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-23T06:35:14,423 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-23T06:35:14,423 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:35:14,424 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343714424 2024-11-23T06:35:14,492 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:14,492 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:14,492 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:14,492 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:14,493 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:14,493 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343703472 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343714424 2024-11-23T06:35:14,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741833_1009 (size=451) 2024-11-23T06:35:14,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741833_1009 (size=451) 2024-11-23T06:35:14,498 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38445:38445),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-23T06:35:14,498 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343703472 is not closed yet, will try archiving it next time 2024-11-23T06:35:14,499 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343703472 to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs/df2f15951535%2C46791%2C1732343701363.1732343703472 2024-11-23T06:35:14,507 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8., hostname=df2f15951535,46791,1732343701363, seqNum=2] 2024-11-23T06:35:26,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46791 {}] regionserver.HRegion(8855): Flush requested on 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:26,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5a377456a28ce5815f6ef08b53b3c4e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:35:26,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/1c99a89bcd90452d9e334d09695d36b7 is 1080, key is row0001/info:/1732343714509/Put/seqid=0 2024-11-23T06:35:26,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741838_1014 (size=12509) 2024-11-23T06:35:26,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741838_1014 (size=12509) 2024-11-23T06:35:27,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/1c99a89bcd90452d9e334d09695d36b7 2024-11-23T06:35:27,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/1c99a89bcd90452d9e334d09695d36b7 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7 2024-11-23T06:35:27,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-23T06:35:27,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5a377456a28ce5815f6ef08b53b3c4e8 in 589ms, sequenceid=11, compaction requested=false 2024-11-23T06:35:27,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5a377456a28ce5815f6ef08b53b3c4e8: 2024-11-23T06:35:29,644 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:35:34,565 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343734565 2024-11-23T06:35:34,775 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:34,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:34,776 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:34,776 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:34,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:34,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:34,776 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343714424 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343734565 2024-11-23T06:35:34,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741837_1013 (size=12399) 2024-11-23T06:35:34,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741837_1013 (size=12399) 2024-11-23T06:35:34,783 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38445:38445),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-23T06:35:34,986 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:37,191 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:39,395 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:41,600 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:41,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46791 {}] regionserver.HRegion(8855): Flush requested on 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:35:41,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5a377456a28ce5815f6ef08b53b3c4e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:35:41,802 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:41,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/5f17495d6cf9420f8e5d5e7bd125d385 is 1080, key is row0008/info:/1732343728549/Put/seqid=0 2024-11-23T06:35:41,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741840_1016 (size=12509) 2024-11-23T06:35:41,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741840_1016 (size=12509) 2024-11-23T06:35:41,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/5f17495d6cf9420f8e5d5e7bd125d385 2024-11-23T06:35:41,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/5f17495d6cf9420f8e5d5e7bd125d385 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/5f17495d6cf9420f8e5d5e7bd125d385 2024-11-23T06:35:41,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/5f17495d6cf9420f8e5d5e7bd125d385, entries=7, sequenceid=21, filesize=12.2 K 2024-11-23T06:35:42,095 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:42,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5a377456a28ce5815f6ef08b53b3c4e8 in 495ms, sequenceid=21, compaction requested=false 2024-11-23T06:35:42,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5a377456a28ce5815f6ef08b53b3c4e8: 2024-11-23T06:35:42,096 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-23T06:35:42,096 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:35:42,097 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7 because midkey is the same as first or last row 2024-11-23T06:35:43,804 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:44,194 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T06:35:44,194 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T06:35:46,008 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:46,010 WARN [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:46,010 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C46791%2C1732343701363:(num 1732343734565) roll requested 2024-11-23T06:35:46,011 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343746011 2024-11-23T06:35:46,224 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK], DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK]] 2024-11-23T06:35:46,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:46,225 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:46,225 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:46,225 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:46,225 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:35:46,225 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343734565 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343746011 2024-11-23T06:35:46,226 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:38445:38445)] 2024-11-23T06:35:46,227 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343734565 is not closed yet, will try archiving it next time 2024-11-23T06:35:46,227 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343714424 to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs/df2f15951535%2C46791%2C1732343701363.1732343714424 2024-11-23T06:35:46,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741839_1015 (size=7739) 2024-11-23T06:35:46,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741839_1015 (size=7739) 2024-11-23T06:35:48,212 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:35:49,780 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5a377456a28ce5815f6ef08b53b3c4e8, had cached 0 bytes from a total of 25018 2024-11-23T06:35:50,416 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:35:52,622 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:35:54,830 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:35:56,835 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T06:35:56,836 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343756836 2024-11-23T06:35:59,645 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:36:01,853 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:01,856 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:01,856 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C46791%2C1732343701363:(num 1732343756836) roll requested 2024-11-23T06:36:01,856 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:01,856 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:01,857 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:01,857 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:01,857 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:01,857 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343746011 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343756836 2024-11-23T06:36:01,858 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:38445:38445)] 2024-11-23T06:36:01,858 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343746011 is not closed yet, will try archiving it next time 2024-11-23T06:36:01,859 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343761859 2024-11-23T06:36:01,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741841_1017 (size=4753) 2024-11-23T06:36:01,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741841_1017 (size=4753) 2024-11-23T06:36:06,865 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5003 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:06,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46791 {}] regionserver.HRegion(8855): Flush requested on 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:36:06,865 WARN [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5003 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:06,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5a377456a28ce5815f6ef08b53b3c4e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:36:06,873 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:06,873 WARN [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:08,866 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T06:36:11,867 INFO [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:11,867 WARN [FSHLog-0-hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958-prefix:df2f15951535,46791,1732343701363 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46207,DS-e1fdd1c0-56cc-44ab-8e4d-51ecc51f72cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32975,DS-2ceba544-63fe-4162-9ba1-c26b895e13ac,DISK]] 2024-11-23T06:36:11,868 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,868 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,868 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,868 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,869 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343756836 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343761859 2024-11-23T06:36:11,870 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38445:38445),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-23T06:36:11,870 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343756836 is not closed yet, will try archiving it next time 2024-11-23T06:36:11,870 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C46791%2C1732343701363:(num 1732343761859) roll requested 2024-11-23T06:36:11,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741842_1018 (size=1569) 2024-11-23T06:36:11,871 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343771870 2024-11-23T06:36:11,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741842_1018 (size=1569) 2024-11-23T06:36:11,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/7798b1ce63a8465fa22f87f10c51d32b is 1080, key is row0015/info:/1732343743602/Put/seqid=0 2024-11-23T06:36:11,886 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,886 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,886 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,886 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,887 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741845_1021 (size=12509) 2024-11-23T06:36:11,887 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343761859 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343771870 2024-11-23T06:36:11,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741845_1021 (size=12509) 2024-11-23T06:36:11,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741843_1019 (size=93) 2024-11-23T06:36:11,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741843_1019 (size=93) 2024-11-23T06:36:11,890 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343761859 to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs/df2f15951535%2C46791%2C1732343701363.1732343761859 2024-11-23T06:36:11,896 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39903:39903),(127.0.0.1/127.0.0.1:38445:38445)] 2024-11-23T06:36:11,897 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C46791%2C1732343701363.1732343771896 2024-11-23T06:36:11,910 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,910 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,910 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,910 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,910 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:11,911 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343771870 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/WALs/df2f15951535,46791,1732343701363/df2f15951535%2C46791%2C1732343701363.1732343771896 2024-11-23T06:36:11,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741844_1020 (size=1258) 2024-11-23T06:36:11,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741844_1020 (size=1258) 2024-11-23T06:36:11,921 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38445:38445),(127.0.0.1/127.0.0.1:39903:39903)] 2024-11-23T06:36:12,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/7798b1ce63a8465fa22f87f10c51d32b 2024-11-23T06:36:12,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/7798b1ce63a8465fa22f87f10c51d32b as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7798b1ce63a8465fa22f87f10c51d32b 2024-11-23T06:36:12,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7798b1ce63a8465fa22f87f10c51d32b, entries=7, sequenceid=31, filesize=12.2 K 2024-11-23T06:36:12,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 5a377456a28ce5815f6ef08b53b3c4e8 in 5449ms, sequenceid=31, compaction requested=true 2024-11-23T06:36:12,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5a377456a28ce5815f6ef08b53b3c4e8: 2024-11-23T06:36:12,315 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-23T06:36:12,315 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:36:12,315 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7 because midkey is the same as first or last row 2024-11-23T06:36:12,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a377456a28ce5815f6ef08b53b3c4e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:36:12,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:36:12,319 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:36:12,322 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:36:12,323 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.HStore(1541): 5a377456a28ce5815f6ef08b53b3c4e8/info is initiating minor compaction (all files) 2024-11-23T06:36:12,324 INFO [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5a377456a28ce5815f6ef08b53b3c4e8/info in TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:36:12,324 INFO [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/5f17495d6cf9420f8e5d5e7bd125d385, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7798b1ce63a8465fa22f87f10c51d32b] into tmpdir=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp, totalSize=36.6 K 2024-11-23T06:36:12,325 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c99a89bcd90452d9e334d09695d36b7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732343714509 2024-11-23T06:36:12,326 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f17495d6cf9420f8e5d5e7bd125d385, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732343728549 2024-11-23T06:36:12,327 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7798b1ce63a8465fa22f87f10c51d32b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732343743602 2024-11-23T06:36:12,363 INFO [RS:0;df2f15951535:46791-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a377456a28ce5815f6ef08b53b3c4e8#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:36:12,365 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/ab702c88d9e341c681d14c76b916ab45 is 1080, key is row0001/info:/1732343714509/Put/seqid=0 2024-11-23T06:36:12,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741847_1023 (size=27710) 2024-11-23T06:36:12,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741847_1023 (size=27710) 2024-11-23T06:36:12,793 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/ab702c88d9e341c681d14c76b916ab45 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/ab702c88d9e341c681d14c76b916ab45 2024-11-23T06:36:12,817 INFO [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5a377456a28ce5815f6ef08b53b3c4e8/info of 5a377456a28ce5815f6ef08b53b3c4e8 into ab702c88d9e341c681d14c76b916ab45(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:36:12,818 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5a377456a28ce5815f6ef08b53b3c4e8: 2024-11-23T06:36:12,820 INFO [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8., storeName=5a377456a28ce5815f6ef08b53b3c4e8/info, priority=13, startTime=1732343772316; duration=0sec 2024-11-23T06:36:12,820 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-23T06:36:12,820 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:36:12,821 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/ab702c88d9e341c681d14c76b916ab45 because midkey is the same as first or last row 2024-11-23T06:36:12,821 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-23T06:36:12,821 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:36:12,821 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/ab702c88d9e341c681d14c76b916ab45 because midkey is the same as first or last row 2024-11-23T06:36:12,822 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-23T06:36:12,822 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:36:12,822 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/ab702c88d9e341c681d14c76b916ab45 because midkey is the same as first or last row 2024-11-23T06:36:12,822 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:36:12,822 DEBUG [RS:0;df2f15951535:46791-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a377456a28ce5815f6ef08b53b3c4e8:info 2024-11-23T06:36:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46791 {}] regionserver.HRegion(8855): Flush requested on 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:36:23,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5a377456a28ce5815f6ef08b53b3c4e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:36:23,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/b0e3908688734bb4b9878f7640b87aa7 is 1080, key is row0022/info:/1732343771898/Put/seqid=0 2024-11-23T06:36:23,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741848_1024 (size=12509) 2024-11-23T06:36:23,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741848_1024 (size=12509) 2024-11-23T06:36:23,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/b0e3908688734bb4b9878f7640b87aa7 2024-11-23T06:36:23,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/b0e3908688734bb4b9878f7640b87aa7 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/b0e3908688734bb4b9878f7640b87aa7 2024-11-23T06:36:23,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/b0e3908688734bb4b9878f7640b87aa7, entries=7, sequenceid=42, filesize=12.2 K 2024-11-23T06:36:23,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5a377456a28ce5815f6ef08b53b3c4e8 in 36ms, sequenceid=42, compaction requested=false 2024-11-23T06:36:23,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5a377456a28ce5815f6ef08b53b3c4e8: 2024-11-23T06:36:23,961 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-23T06:36:23,961 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:36:23,961 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/ab702c88d9e341c681d14c76b916ab45 because midkey is the same as first or last row 2024-11-23T06:36:29,645 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:36:31,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:36:31,942 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:36:31,943 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:36:31,950 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:31,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:31,951 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:36:31,951 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:36:31,951 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1575943754, stopped=false 2024-11-23T06:36:31,952 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,44363,1732343700498 2024-11-23T06:36:31,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:31,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:31,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:31,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:31,998 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:36:31,998 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:36:31,999 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:36:31,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:31,999 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:32,000 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:32,000 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,46791,1732343701363' ***** 2024-11-23T06:36:32,000 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:36:32,001 INFO [RS:0;df2f15951535:46791 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:36:32,002 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:36:32,002 INFO [RS:0;df2f15951535:46791 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:36:32,002 INFO [RS:0;df2f15951535:46791 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:36:32,002 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(3091): Received CLOSE for 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:36:32,003 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(959): stopping server df2f15951535,46791,1732343701363 2024-11-23T06:36:32,003 INFO [RS:0;df2f15951535:46791 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:36:32,003 INFO [RS:0;df2f15951535:46791 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:46791. 2024-11-23T06:36:32,003 DEBUG [RS:0;df2f15951535:46791 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:36:32,003 DEBUG [RS:0;df2f15951535:46791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:32,004 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5a377456a28ce5815f6ef08b53b3c4e8, disabling compactions & flushes 2024-11-23T06:36:32,004 INFO [RS:0;df2f15951535:46791 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:36:32,004 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:36:32,004 INFO [RS:0;df2f15951535:46791 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:36:32,004 INFO [RS:0;df2f15951535:46791 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:36:32,004 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:36:32,004 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. after waiting 0 ms 2024-11-23T06:36:32,004 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:36:32,004 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:36:32,004 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5a377456a28ce5815f6ef08b53b3c4e8 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-23T06:36:32,004 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T06:36:32,004 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:36:32,004 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5a377456a28ce5815f6ef08b53b3c4e8=TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.} 2024-11-23T06:36:32,004 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:36:32,004 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:36:32,005 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:36:32,005 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:36:32,005 DEBUG [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5a377456a28ce5815f6ef08b53b3c4e8 2024-11-23T06:36:32,005 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-23T06:36:32,010 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/7102a4dbb8194867a98b5a5c100dde78 is 1080, key is row0029/info:/1732343785931/Put/seqid=0 2024-11-23T06:36:32,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741849_1025 (size=8193) 2024-11-23T06:36:32,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741849_1025 (size=8193) 2024-11-23T06:36:32,018 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/7102a4dbb8194867a98b5a5c100dde78 2024-11-23T06:36:32,027 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/.tmp/info/7102a4dbb8194867a98b5a5c100dde78 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7102a4dbb8194867a98b5a5c100dde78 2024-11-23T06:36:32,032 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/info/22545b0fee5f41f7aa4148744370f0f6 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8./info:regioninfo/1732343704808/Put/seqid=0 2024-11-23T06:36:32,036 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7102a4dbb8194867a98b5a5c100dde78, entries=3, sequenceid=48, filesize=8.0 K 2024-11-23T06:36:32,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741850_1026 (size=7016) 2024-11-23T06:36:32,038 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 5a377456a28ce5815f6ef08b53b3c4e8 in 33ms, sequenceid=48, compaction requested=true 2024-11-23T06:36:32,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741850_1026 (size=7016) 2024-11-23T06:36:32,039 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/info/22545b0fee5f41f7aa4148744370f0f6 2024-11-23T06:36:32,041 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/5f17495d6cf9420f8e5d5e7bd125d385, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7798b1ce63a8465fa22f87f10c51d32b] to archive 2024-11-23T06:36:32,045 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T06:36:32,048 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7 to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/1c99a89bcd90452d9e334d09695d36b7 2024-11-23T06:36:32,050 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/5f17495d6cf9420f8e5d5e7bd125d385 to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/5f17495d6cf9420f8e5d5e7bd125d385 2024-11-23T06:36:32,052 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7798b1ce63a8465fa22f87f10c51d32b to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/archive/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/info/7798b1ce63a8465fa22f87f10c51d32b 2024-11-23T06:36:32,064 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/ns/ce1613e1996d4edbb1c9b0043dc8a38d is 43, key is default/ns:d/1732343704071/Put/seqid=0 2024-11-23T06:36:32,062 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=df2f15951535:44363 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T06:36:32,066 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1c99a89bcd90452d9e334d09695d36b7=12509, 5f17495d6cf9420f8e5d5e7bd125d385=12509, 7798b1ce63a8465fa22f87f10c51d32b=12509] 2024-11-23T06:36:32,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741851_1027 (size=5153) 2024-11-23T06:36:32,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741851_1027 (size=5153) 2024-11-23T06:36:32,070 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/ns/ce1613e1996d4edbb1c9b0043dc8a38d 2024-11-23T06:36:32,072 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/default/TestLogRolling-testSlowSyncLogRolling/5a377456a28ce5815f6ef08b53b3c4e8/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-23T06:36:32,075 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:36:32,075 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5a377456a28ce5815f6ef08b53b3c4e8: Waiting for close lock at 1732343792003Running coprocessor pre-close hooks at 1732343792004 (+1 ms)Disabling compacts and flushes for region at 1732343792004Disabling writes for close at 1732343792004Obtaining lock to block concurrent updates at 1732343792004Preparing flush snapshotting stores in 5a377456a28ce5815f6ef08b53b3c4e8 at 1732343792004Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732343792005 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. at 1732343792006 (+1 ms)Flushing 5a377456a28ce5815f6ef08b53b3c4e8/info: creating writer at 1732343792006Flushing 5a377456a28ce5815f6ef08b53b3c4e8/info: appending metadata at 1732343792010 (+4 ms)Flushing 5a377456a28ce5815f6ef08b53b3c4e8/info: closing flushed file at 1732343792010Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ca6b474: reopening flushed file at 1732343792026 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 5a377456a28ce5815f6ef08b53b3c4e8 in 33ms, sequenceid=48, compaction requested=true at 1732343792038 (+12 ms)Writing region close event to WAL at 1732343792067 (+29 ms)Running coprocessor post-close hooks at 1732343792073 (+6 ms)Closed at 1732343792075 (+2 ms) 2024-11-23T06:36:32,076 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732343704306.5a377456a28ce5815f6ef08b53b3c4e8. 2024-11-23T06:36:32,102 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/table/c2bdf7cdfb6540a98bfd01f3725348bf is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732343704825/Put/seqid=0 2024-11-23T06:36:32,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741852_1028 (size=5396) 2024-11-23T06:36:32,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741852_1028 (size=5396) 2024-11-23T06:36:32,109 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/table/c2bdf7cdfb6540a98bfd01f3725348bf 2024-11-23T06:36:32,119 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/info/22545b0fee5f41f7aa4148744370f0f6 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/info/22545b0fee5f41f7aa4148744370f0f6 2024-11-23T06:36:32,128 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/info/22545b0fee5f41f7aa4148744370f0f6, entries=10, sequenceid=11, filesize=6.9 K 2024-11-23T06:36:32,129 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/ns/ce1613e1996d4edbb1c9b0043dc8a38d as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/ns/ce1613e1996d4edbb1c9b0043dc8a38d 2024-11-23T06:36:32,138 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/ns/ce1613e1996d4edbb1c9b0043dc8a38d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T06:36:32,140 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/.tmp/table/c2bdf7cdfb6540a98bfd01f3725348bf as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/table/c2bdf7cdfb6540a98bfd01f3725348bf 2024-11-23T06:36:32,148 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/table/c2bdf7cdfb6540a98bfd01f3725348bf, entries=2, sequenceid=11, filesize=5.3 K 2024-11-23T06:36:32,150 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false 2024-11-23T06:36:32,156 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T06:36:32,156 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:36:32,156 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:36:32,157 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343792004Running coprocessor pre-close hooks at 1732343792004Disabling compacts and flushes for region at 1732343792004Disabling writes for close at 1732343792005 (+1 ms)Obtaining lock to block concurrent updates at 1732343792005Preparing flush snapshotting stores in 1588230740 at 1732343792005Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732343792006 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732343792006Flushing 1588230740/info: creating writer at 1732343792007 (+1 ms)Flushing 1588230740/info: appending metadata at 1732343792031 (+24 ms)Flushing 1588230740/info: closing flushed file at 1732343792031Flushing 1588230740/ns: creating writer at 1732343792048 (+17 ms)Flushing 1588230740/ns: appending metadata at 1732343792063 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732343792063Flushing 1588230740/table: creating writer at 1732343792080 (+17 ms)Flushing 1588230740/table: appending metadata at 1732343792101 (+21 ms)Flushing 1588230740/table: closing flushed file at 1732343792101Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12ca7b34: reopening flushed file at 1732343792117 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e2918ee: reopening flushed file at 1732343792128 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5155b8e7: reopening flushed file at 1732343792138 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=11, compaction requested=false at 1732343792150 (+12 ms)Writing region close event to WAL at 1732343792151 (+1 ms)Running coprocessor post-close hooks at 1732343792156 (+5 ms)Closed at 1732343792156 2024-11-23T06:36:32,157 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:36:32,205 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(976): stopping server df2f15951535,46791,1732343701363; all regions closed. 2024-11-23T06:36:32,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741834_1010 (size=3066) 2024-11-23T06:36:32,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741834_1010 (size=3066) 2024-11-23T06:36:32,214 DEBUG [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs 2024-11-23T06:36:32,214 INFO [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C46791%2C1732343701363.meta:.meta(num 1732343703877) 2024-11-23T06:36:32,215 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,215 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,215 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,215 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741846_1022 (size=13040) 2024-11-23T06:36:32,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741846_1022 (size=13040) 2024-11-23T06:36:32,225 DEBUG [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/oldWALs 2024-11-23T06:36:32,225 INFO [RS:0;df2f15951535:46791 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C46791%2C1732343701363:(num 1732343771896) 2024-11-23T06:36:32,225 DEBUG [RS:0;df2f15951535:46791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:32,225 INFO [RS:0;df2f15951535:46791 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:36:32,225 INFO [RS:0;df2f15951535:46791 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:36:32,225 INFO [RS:0;df2f15951535:46791 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:36:32,225 INFO [RS:0;df2f15951535:46791 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:36:32,226 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:36:32,226 INFO [RS:0;df2f15951535:46791 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46791 2024-11-23T06:36:32,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,46791,1732343701363 2024-11-23T06:36:32,273 INFO [RS:0;df2f15951535:46791 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:36:32,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:36:32,295 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,46791,1732343701363] 2024-11-23T06:36:32,305 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,46791,1732343701363 already deleted, retry=false 2024-11-23T06:36:32,305 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,46791,1732343701363 expired; onlineServers=0 2024-11-23T06:36:32,305 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,44363,1732343700498' ***** 2024-11-23T06:36:32,305 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:36:32,305 INFO [M:0;df2f15951535:44363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:36:32,306 INFO [M:0;df2f15951535:44363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:36:32,306 DEBUG [M:0;df2f15951535:44363 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:36:32,306 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:36:32,306 DEBUG [M:0;df2f15951535:44363 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:36:32,306 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343703139 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343703139,5,FailOnTimeoutGroup] 2024-11-23T06:36:32,306 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343703139 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343703139,5,FailOnTimeoutGroup] 2024-11-23T06:36:32,306 INFO [M:0;df2f15951535:44363 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:36:32,306 INFO [M:0;df2f15951535:44363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:36:32,306 DEBUG [M:0;df2f15951535:44363 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:36:32,306 INFO [M:0;df2f15951535:44363 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:36:32,306 INFO [M:0;df2f15951535:44363 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:36:32,307 INFO [M:0;df2f15951535:44363 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:36:32,307 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:36:32,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:36:32,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:32,316 DEBUG [M:0;df2f15951535:44363 {}] zookeeper.ZKUtil(347): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:36:32,316 WARN [M:0;df2f15951535:44363 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:36:32,317 INFO [M:0;df2f15951535:44363 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/.lastflushedseqids 2024-11-23T06:36:32,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741853_1029 (size=130) 2024-11-23T06:36:32,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741853_1029 (size=130) 2024-11-23T06:36:32,331 INFO [M:0;df2f15951535:44363 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:36:32,331 INFO [M:0;df2f15951535:44363 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:36:32,331 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:36:32,331 INFO [M:0;df2f15951535:44363 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:32,331 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:32,331 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:36:32,331 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:32,332 INFO [M:0;df2f15951535:44363 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-11-23T06:36:32,349 DEBUG [M:0;df2f15951535:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3f016b82e65c4cbc923607c382aed6a2 is 82, key is hbase:meta,,1/info:regioninfo/1732343703961/Put/seqid=0 2024-11-23T06:36:32,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741854_1030 (size=5672) 2024-11-23T06:36:32,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741854_1030 (size=5672) 2024-11-23T06:36:32,356 INFO [M:0;df2f15951535:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3f016b82e65c4cbc923607c382aed6a2 2024-11-23T06:36:32,380 DEBUG [M:0;df2f15951535:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3db9af0fc0945aabad66d985d328064 is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732343704833/Put/seqid=0 2024-11-23T06:36:32,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741855_1031 (size=6246) 2024-11-23T06:36:32,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741855_1031 (size=6246) 2024-11-23T06:36:32,386 INFO [M:0;df2f15951535:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3db9af0fc0945aabad66d985d328064 2024-11-23T06:36:32,392 INFO [M:0;df2f15951535:44363 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3db9af0fc0945aabad66d985d328064 2024-11-23T06:36:32,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:32,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46791-0x101666714980001, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:32,395 INFO [RS:0;df2f15951535:46791 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:36:32,396 INFO [RS:0;df2f15951535:46791 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,46791,1732343701363; zookeeper connection closed. 2024-11-23T06:36:32,396 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4f7b8650 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4f7b8650 2024-11-23T06:36:32,397 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T06:36:32,410 DEBUG [M:0;df2f15951535:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9eb35faf3a974b8181556e7a6c2badd2 is 69, key is df2f15951535,46791,1732343701363/rs:state/1732343703241/Put/seqid=0 2024-11-23T06:36:32,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741856_1032 (size=5156) 2024-11-23T06:36:32,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741856_1032 (size=5156) 2024-11-23T06:36:32,417 INFO [M:0;df2f15951535:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9eb35faf3a974b8181556e7a6c2badd2 2024-11-23T06:36:32,438 DEBUG [M:0;df2f15951535:44363 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/163ccff88d8b4d50a4591dccb661bf87 is 52, key is load_balancer_on/state:d/1732343704282/Put/seqid=0 2024-11-23T06:36:32,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741857_1033 (size=5056) 2024-11-23T06:36:32,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741857_1033 (size=5056) 2024-11-23T06:36:32,445 INFO [M:0;df2f15951535:44363 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/163ccff88d8b4d50a4591dccb661bf87 2024-11-23T06:36:32,452 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3f016b82e65c4cbc923607c382aed6a2 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3f016b82e65c4cbc923607c382aed6a2 2024-11-23T06:36:32,459 INFO [M:0;df2f15951535:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3f016b82e65c4cbc923607c382aed6a2, entries=8, sequenceid=59, filesize=5.5 K 2024-11-23T06:36:32,461 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3db9af0fc0945aabad66d985d328064 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3db9af0fc0945aabad66d985d328064 2024-11-23T06:36:32,467 INFO [M:0;df2f15951535:44363 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3db9af0fc0945aabad66d985d328064 2024-11-23T06:36:32,468 INFO [M:0;df2f15951535:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3db9af0fc0945aabad66d985d328064, entries=6, sequenceid=59, filesize=6.1 K 2024-11-23T06:36:32,469 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9eb35faf3a974b8181556e7a6c2badd2 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9eb35faf3a974b8181556e7a6c2badd2 2024-11-23T06:36:32,475 INFO [M:0;df2f15951535:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9eb35faf3a974b8181556e7a6c2badd2, entries=1, sequenceid=59, filesize=5.0 K 2024-11-23T06:36:32,476 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/163ccff88d8b4d50a4591dccb661bf87 as hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/163ccff88d8b4d50a4591dccb661bf87 2024-11-23T06:36:32,483 INFO [M:0;df2f15951535:44363 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/163ccff88d8b4d50a4591dccb661bf87, entries=1, sequenceid=59, filesize=4.9 K 2024-11-23T06:36:32,484 INFO [M:0;df2f15951535:44363 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=59, compaction requested=false 2024-11-23T06:36:32,486 INFO [M:0;df2f15951535:44363 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:32,486 DEBUG [M:0;df2f15951535:44363 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343792331Disabling compacts and flushes for region at 1732343792331Disabling writes for close at 1732343792331Obtaining lock to block concurrent updates at 1732343792332 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343792332Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1732343792332Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343792333 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343792333Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343792348 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343792348Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343792363 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343792379 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343792380 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343792392 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343792409 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343792410 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343792423 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343792437 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343792438 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20f292f6: reopening flushed file at 1732343792451 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dc904e8: reopening flushed file at 1732343792459 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@678b0b98: reopening flushed file at 1732343792468 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ab2b5f4: reopening flushed file at 1732343792475 (+7 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=59, compaction requested=false at 1732343792484 (+9 ms)Writing region close event to WAL at 1732343792486 (+2 ms)Closed at 1732343792486 2024-11-23T06:36:32,487 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,487 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,487 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,487 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,488 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:32,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32975 is added to blk_1073741830_1006 (size=27961) 2024-11-23T06:36:32,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46207 is added to blk_1073741830_1006 (size=27961) 2024-11-23T06:36:32,491 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:36:32,491 INFO [M:0;df2f15951535:44363 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:36:32,491 INFO [M:0;df2f15951535:44363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44363 2024-11-23T06:36:32,491 INFO [M:0;df2f15951535:44363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:36:32,605 INFO [M:0;df2f15951535:44363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:36:32,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:32,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44363-0x101666714980000, quorum=127.0.0.1:51410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:32,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:32,616 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:32,616 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:32,617 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:32,617 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:32,623 WARN [BP-1989195401-172.17.0.3-1732343696509 heartbeating to localhost/127.0.0.1:43385 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:32,623 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:32,623 WARN [BP-1989195401-172.17.0.3-1732343696509 heartbeating to localhost/127.0.0.1:43385 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1989195401-172.17.0.3-1732343696509 (Datanode Uuid 5cb5e92e-71c0-46f3-9564-3af36a225197) service to localhost/127.0.0.1:43385 2024-11-23T06:36:32,623 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:32,625 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data3/current/BP-1989195401-172.17.0.3-1732343696509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:32,626 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data4/current/BP-1989195401-172.17.0.3-1732343696509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:32,627 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:32,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:32,630 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:32,630 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:32,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:32,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:32,632 WARN [BP-1989195401-172.17.0.3-1732343696509 heartbeating to localhost/127.0.0.1:43385 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:32,632 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:32,632 WARN [BP-1989195401-172.17.0.3-1732343696509 heartbeating to localhost/127.0.0.1:43385 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1989195401-172.17.0.3-1732343696509 (Datanode Uuid f2d12bfb-3015-4b89-9b39-ca97c3559cb6) service to localhost/127.0.0.1:43385 2024-11-23T06:36:32,632 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:32,633 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data1/current/BP-1989195401-172.17.0.3-1732343696509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:32,633 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/cluster_ce58c3b4-4f6e-9669-e8be-e90229abc401/data/data2/current/BP-1989195401-172.17.0.3-1732343696509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:32,634 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:32,642 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:36:32,643 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:32,643 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:32,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:32,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:32,655 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:36:32,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:36:32,693 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43385 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/df2f15951535:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43385 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/df2f15951535:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43385 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43385 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5829c7be java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:43385 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43385 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43385 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/df2f15951535:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43385 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/df2f15951535:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 248) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8378 (was 8412) 2024-11-23T06:36:32,699 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=287, ProcessCount=11, AvailableMemoryMB=8378 2024-11-23T06:36:32,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:36:32,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.log.dir so I do NOT create it in target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27a1ab6b-37c3-7b9a-2d5b-166e5262b1a9/hadoop.tmp.dir so I do NOT create it in target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f, deleteOnExit=true 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/test.cache.data in system properties and HBase conf 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:36:32,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:36:32,701 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:36:32,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:36:32,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:36:32,717 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:36:33,075 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:33,081 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:33,082 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:33,082 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:33,083 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:36:33,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:33,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@641eaf99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:33,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a15ed6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:33,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ce0de36{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/java.io.tmpdir/jetty-localhost-43955-hadoop-hdfs-3_4_1-tests_jar-_-any-8300721564054521510/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:36:33,181 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3614f662{HTTP/1.1, (http/1.1)}{localhost:43955} 2024-11-23T06:36:33,181 INFO [Time-limited test {}] server.Server(415): Started @99178ms 2024-11-23T06:36:33,193 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:36:33,325 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:36:33,477 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:33,481 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:33,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:33,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:33,482 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:36:33,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa9c156{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:33,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e7873b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:33,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cd60cfb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/java.io.tmpdir/jetty-localhost-39365-hadoop-hdfs-3_4_1-tests_jar-_-any-11575777173319313108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:33,579 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f88e14b{HTTP/1.1, (http/1.1)}{localhost:39365} 2024-11-23T06:36:33,579 INFO [Time-limited test {}] server.Server(415): Started @99575ms 2024-11-23T06:36:33,580 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:33,616 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:33,619 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:33,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:33,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:33,621 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:36:33,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5e070a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:33,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@573af0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:33,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b5e52bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/java.io.tmpdir/jetty-localhost-46271-hadoop-hdfs-3_4_1-tests_jar-_-any-314348705624532146/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:33,715 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@882842c{HTTP/1.1, (http/1.1)}{localhost:46271} 2024-11-23T06:36:33,715 INFO [Time-limited test {}] server.Server(415): Started @99712ms 2024-11-23T06:36:33,717 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:35,016 WARN [Thread-447 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data2/current/BP-358645030-172.17.0.3-1732343792728/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:35,016 WARN [Thread-446 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data1/current/BP-358645030-172.17.0.3-1732343792728/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:35,034 WARN [Thread-410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:35,037 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf5b9eeef51f590a with lease ID 0xc6cb1fbddee6fd02: Processing first storage report for DS-b9ee7991-067b-4660-9009-32c5bacc2a77 from datanode DatanodeRegistration(127.0.0.1:41149, datanodeUuid=d9b0e659-fcaf-4b11-a00e-11e31a2fabab, infoPort=41411, infoSecurePort=0, ipcPort=40555, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728) 2024-11-23T06:36:35,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf5b9eeef51f590a with lease ID 0xc6cb1fbddee6fd02: from storage DS-b9ee7991-067b-4660-9009-32c5bacc2a77 node DatanodeRegistration(127.0.0.1:41149, datanodeUuid=d9b0e659-fcaf-4b11-a00e-11e31a2fabab, infoPort=41411, infoSecurePort=0, ipcPort=40555, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:35,037 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbf5b9eeef51f590a with lease ID 0xc6cb1fbddee6fd02: Processing first storage report for DS-e19cb784-5315-47b1-9b89-ca8fa01d583b from datanode DatanodeRegistration(127.0.0.1:41149, datanodeUuid=d9b0e659-fcaf-4b11-a00e-11e31a2fabab, infoPort=41411, infoSecurePort=0, ipcPort=40555, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728) 2024-11-23T06:36:35,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbf5b9eeef51f590a with lease ID 0xc6cb1fbddee6fd02: from storage DS-e19cb784-5315-47b1-9b89-ca8fa01d583b node DatanodeRegistration(127.0.0.1:41149, datanodeUuid=d9b0e659-fcaf-4b11-a00e-11e31a2fabab, infoPort=41411, infoSecurePort=0, ipcPort=40555, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:35,145 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data3/current/BP-358645030-172.17.0.3-1732343792728/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:35,146 WARN [Thread-458 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data4/current/BP-358645030-172.17.0.3-1732343792728/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:35,161 WARN [Thread-433 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a551bf0dfaadf45 with lease ID 0xc6cb1fbddee6fd03: Processing first storage report for DS-a98de05e-b58c-4ff0-a260-b988b6f76788 from datanode DatanodeRegistration(127.0.0.1:45925, datanodeUuid=90a1ee71-f90f-4f62-b3d1-c086b6600764, infoPort=38003, infoSecurePort=0, ipcPort=37711, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728) 2024-11-23T06:36:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a551bf0dfaadf45 with lease ID 0xc6cb1fbddee6fd03: from storage DS-a98de05e-b58c-4ff0-a260-b988b6f76788 node DatanodeRegistration(127.0.0.1:45925, datanodeUuid=90a1ee71-f90f-4f62-b3d1-c086b6600764, infoPort=38003, infoSecurePort=0, ipcPort=37711, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T06:36:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a551bf0dfaadf45 with lease ID 0xc6cb1fbddee6fd03: Processing first storage report for DS-b62e7fc3-385a-495d-810d-2d0a709f428e from datanode DatanodeRegistration(127.0.0.1:45925, datanodeUuid=90a1ee71-f90f-4f62-b3d1-c086b6600764, infoPort=38003, infoSecurePort=0, ipcPort=37711, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728) 2024-11-23T06:36:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a551bf0dfaadf45 with lease ID 0xc6cb1fbddee6fd03: from storage DS-b62e7fc3-385a-495d-810d-2d0a709f428e node DatanodeRegistration(127.0.0.1:45925, datanodeUuid=90a1ee71-f90f-4f62-b3d1-c086b6600764, infoPort=38003, infoSecurePort=0, ipcPort=37711, storageInfo=lv=-57;cid=testClusterID;nsid=1790039881;c=1732343792728), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:35,265 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7 2024-11-23T06:36:35,284 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/zookeeper_0, clientPort=62289, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:36:35,285 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62289 2024-11-23T06:36:35,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:36:35,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:36:35,302 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4 with version=8 2024-11-23T06:36:35,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:36:35,305 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:36:35,305 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:35,305 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:35,306 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:36:35,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:35,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:36:35,306 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:36:35,306 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:36:35,307 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37617 2024-11-23T06:36:35,309 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37617 connecting to ZooKeeper ensemble=127.0.0.1:62289 2024-11-23T06:36:35,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:376170x0, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:36:35,382 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37617-0x101666889fe0000 connected 2024-11-23T06:36:35,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:35,495 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4, hbase.cluster.distributed=false 2024-11-23T06:36:35,499 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:36:35,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37617 2024-11-23T06:36:35,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37617 2024-11-23T06:36:35,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37617 2024-11-23T06:36:35,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37617 2024-11-23T06:36:35,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37617 2024-11-23T06:36:35,519 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:36:35,519 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:36:35,520 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36413 2024-11-23T06:36:35,522 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36413 connecting to ZooKeeper ensemble=127.0.0.1:62289 2024-11-23T06:36:35,523 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,536 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364130x0, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:36:35,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364130x0, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:35,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36413-0x101666889fe0001 connected 2024-11-23T06:36:35,537 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:36:35,540 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:36:35,541 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:36:35,542 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:36:35,543 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36413 2024-11-23T06:36:35,543 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36413 2024-11-23T06:36:35,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36413 2024-11-23T06:36:35,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36413 2024-11-23T06:36:35,548 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36413 2024-11-23T06:36:35,562 DEBUG [M:0;df2f15951535:37617 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:37617 2024-11-23T06:36:35,563 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,37617,1732343795305 2024-11-23T06:36:35,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:35,575 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:35,576 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,37617,1732343795305 2024-11-23T06:36:35,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,589 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:36:35,589 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,589 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:36:35,590 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,37617,1732343795305 from backup master directory 2024-11-23T06:36:35,599 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:35,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,37617,1732343795305 2024-11-23T06:36:35,599 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:36:35,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:35,600 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,37617,1732343795305 2024-11-23T06:36:35,605 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/hbase.id] with ID: 54f36218-7e7b-4030-b287-3d968656b426 2024-11-23T06:36:35,605 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/.tmp/hbase.id 2024-11-23T06:36:35,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:36:35,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:36:35,615 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/.tmp/hbase.id]:[hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/hbase.id] 2024-11-23T06:36:35,632 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:35,632 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:36:35,634 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T06:36:35,641 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:36:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:36:35,650 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:36:35,651 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:36:35,651 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:36:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:36:35,662 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store 2024-11-23T06:36:35,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:36:35,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:36:35,670 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:35,671 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:36:35,671 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:35,671 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:35,671 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:36:35,671 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:35,671 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:35,671 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343795670Disabling compacts and flushes for region at 1732343795670Disabling writes for close at 1732343795671 (+1 ms)Writing region close event to WAL at 1732343795671Closed at 1732343795671 2024-11-23T06:36:35,672 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/.initializing 2024-11-23T06:36:35,672 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/WALs/df2f15951535,37617,1732343795305 2024-11-23T06:36:35,675 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C37617%2C1732343795305, suffix=, logDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/WALs/df2f15951535,37617,1732343795305, archiveDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/oldWALs, maxLogs=10 2024-11-23T06:36:35,676 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C37617%2C1732343795305.1732343795676 2024-11-23T06:36:35,681 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/WALs/df2f15951535,37617,1732343795305/df2f15951535%2C37617%2C1732343795305.1732343795676 2024-11-23T06:36:35,682 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38003:38003),(127.0.0.1/127.0.0.1:41411:41411)] 2024-11-23T06:36:35,683 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:36:35,683 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:35,683 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,683 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:36:35,686 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:35,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,688 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:36:35,688 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:35,689 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:36:35,692 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:35,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:36:35,694 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:35,695 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,696 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,696 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,698 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,698 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,698 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:36:35,700 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:35,702 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:36:35,703 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790568, jitterRate=0.0052592456340789795}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:36:35,703 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343795683Initializing all the Stores at 1732343795684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343795684Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343795684Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343795684Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343795685 (+1 ms)Cleaning up temporary data from old regions at 1732343795698 (+13 ms)Region opened successfully at 1732343795703 (+5 ms) 2024-11-23T06:36:35,704 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:36:35,708 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11f8b732, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:36:35,709 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:36:35,709 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:36:35,709 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:36:35,709 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:36:35,710 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T06:36:35,710 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T06:36:35,710 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:36:35,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:36:35,715 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:36:35,726 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:36:35,726 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:36:35,727 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:36:35,736 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:36:35,737 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:36:35,738 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:36:35,746 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:36:35,748 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:36:35,757 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:36:35,760 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:36:35,767 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:36:35,778 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:35,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:35,778 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,779 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,37617,1732343795305, sessionid=0x101666889fe0000, setting cluster-up flag (Was=false) 2024-11-23T06:36:35,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,800 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,831 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:36:35,834 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,37617,1732343795305 2024-11-23T06:36:35,863 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:35,894 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:36:35,898 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,37617,1732343795305 2024-11-23T06:36:35,901 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:36:35,906 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:35,907 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:36:35,907 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:36:35,907 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,37617,1732343795305 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:35,910 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:36:35,911 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732343825911 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:36:35,912 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:35,913 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:36:35,913 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:35,913 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:36:35,913 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:36:35,913 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:36:35,913 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:36:35,913 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:36:35,914 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343795914,5,FailOnTimeoutGroup] 2024-11-23T06:36:35,914 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343795914,5,FailOnTimeoutGroup] 2024-11-23T06:36:35,914 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:35,914 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:36:35,914 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:35,914 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:35,914 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,915 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:36:35,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:36:35,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:36:35,925 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:36:35,925 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4 2024-11-23T06:36:35,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:36:35,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:36:35,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:35,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:36:35,940 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:36:35,941 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:35,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:36:35,943 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:36:35,943 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:35,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:36:35,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:36:35,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:35,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:36:35,948 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:36:35,948 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:35,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:35,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:36:35,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740 2024-11-23T06:36:35,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740 2024-11-23T06:36:35,950 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(746): ClusterId : 54f36218-7e7b-4030-b287-3d968656b426 2024-11-23T06:36:35,950 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:36:35,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:36:35,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:36:35,952 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:36:35,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:36:35,955 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:36:35,956 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837120, jitterRate=0.06445325911045074}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:36:35,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343795937Initializing all the Stores at 1732343795938 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343795938Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343795938Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343795938Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343795939 (+1 ms)Cleaning up temporary data from old regions at 1732343795951 (+12 ms)Region opened successfully at 1732343795957 (+6 ms) 2024-11-23T06:36:35,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:36:35,957 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:36:35,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:36:35,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:36:35,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:36:35,958 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:36:35,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343795957Disabling compacts and flushes for region at 1732343795957Disabling writes for close at 1732343795957Writing region close event to WAL at 1732343795958 (+1 ms)Closed at 1732343795958 2024-11-23T06:36:35,958 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:36:35,958 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:36:35,959 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:35,959 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:36:35,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:36:35,961 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:36:35,962 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:36:35,969 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:36:35,969 DEBUG [RS:0;df2f15951535:36413 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2873c505, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:36:35,982 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:36413 2024-11-23T06:36:35,982 INFO [RS:0;df2f15951535:36413 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:36:35,982 INFO [RS:0;df2f15951535:36413 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:36:35,982 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:36:35,983 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,37617,1732343795305 with port=36413, startcode=1732343795518 2024-11-23T06:36:35,984 DEBUG [RS:0;df2f15951535:36413 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:36:35,987 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40853, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:36:35,988 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37617 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,36413,1732343795518 2024-11-23T06:36:35,988 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37617 {}] master.ServerManager(517): Registering regionserver=df2f15951535,36413,1732343795518 2024-11-23T06:36:35,990 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4 2024-11-23T06:36:35,990 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39029 2024-11-23T06:36:35,991 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:36:35,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:36:36,000 DEBUG [RS:0;df2f15951535:36413 {}] zookeeper.ZKUtil(111): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,36413,1732343795518 2024-11-23T06:36:36,000 WARN [RS:0;df2f15951535:36413 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:36:36,000 INFO [RS:0;df2f15951535:36413 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:36,000 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/WALs/df2f15951535,36413,1732343795518 2024-11-23T06:36:36,000 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,36413,1732343795518] 2024-11-23T06:36:36,005 INFO [RS:0;df2f15951535:36413 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:36:36,008 INFO [RS:0;df2f15951535:36413 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:36:36,009 INFO [RS:0;df2f15951535:36413 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:36:36,009 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,009 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:36:36,011 INFO [RS:0;df2f15951535:36413 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:36:36,011 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,011 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,011 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,011 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,011 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,012 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:36,013 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:36:36,013 DEBUG [RS:0;df2f15951535:36413 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:36:36,013 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,013 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,013 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,013 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,013 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,013 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,36413,1732343795518-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:36:36,029 INFO [RS:0;df2f15951535:36413 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:36:36,029 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,36413,1732343795518-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,029 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,030 INFO [RS:0;df2f15951535:36413 {}] regionserver.Replication(171): df2f15951535,36413,1732343795518 started 2024-11-23T06:36:36,043 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,044 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,36413,1732343795518, RpcServer on df2f15951535/172.17.0.3:36413, sessionid=0x101666889fe0001 2024-11-23T06:36:36,044 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:36:36,044 DEBUG [RS:0;df2f15951535:36413 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,36413,1732343795518 2024-11-23T06:36:36,044 DEBUG [RS:0;df2f15951535:36413 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,36413,1732343795518' 2024-11-23T06:36:36,044 DEBUG [RS:0;df2f15951535:36413 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:36:36,045 DEBUG [RS:0;df2f15951535:36413 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:36:36,045 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:36:36,045 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:36:36,045 DEBUG [RS:0;df2f15951535:36413 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,36413,1732343795518 2024-11-23T06:36:36,045 DEBUG [RS:0;df2f15951535:36413 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,36413,1732343795518' 2024-11-23T06:36:36,045 DEBUG [RS:0;df2f15951535:36413 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:36:36,046 DEBUG [RS:0;df2f15951535:36413 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:36:36,047 DEBUG [RS:0;df2f15951535:36413 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:36:36,047 INFO [RS:0;df2f15951535:36413 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:36:36,047 INFO [RS:0;df2f15951535:36413 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:36:36,113 WARN [df2f15951535:37617 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T06:36:36,152 INFO [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C36413%2C1732343795518, suffix=, logDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/WALs/df2f15951535,36413,1732343795518, archiveDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/oldWALs, maxLogs=32 2024-11-23T06:36:36,156 INFO [RS:0;df2f15951535:36413 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C36413%2C1732343795518.1732343796156 2024-11-23T06:36:36,163 INFO [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/WALs/df2f15951535,36413,1732343795518/df2f15951535%2C36413%2C1732343795518.1732343796156 2024-11-23T06:36:36,166 DEBUG [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41411:41411),(127.0.0.1/127.0.0.1:38003:38003)] 2024-11-23T06:36:36,363 DEBUG [df2f15951535:37617 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:36:36,364 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,36413,1732343795518 2024-11-23T06:36:36,366 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,36413,1732343795518, state=OPENING 2024-11-23T06:36:36,418 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:36:36,431 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:36,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:36,433 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:36:36,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,36413,1732343795518}] 2024-11-23T06:36:36,434 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:36,434 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:36,592 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:36:36,598 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58529, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:36:36,604 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:36:36,604 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:36,607 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C36413%2C1732343795518.meta, suffix=.meta, logDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/WALs/df2f15951535,36413,1732343795518, archiveDir=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/oldWALs, maxLogs=32 2024-11-23T06:36:36,610 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C36413%2C1732343795518.meta.1732343796610.meta 2024-11-23T06:36:36,616 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/WALs/df2f15951535,36413,1732343795518/df2f15951535%2C36413%2C1732343795518.meta.1732343796610.meta 2024-11-23T06:36:36,619 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41411:41411),(127.0.0.1/127.0.0.1:38003:38003)] 2024-11-23T06:36:36,620 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:36:36,620 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:36:36,620 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:36:36,620 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:36:36,620 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:36:36,620 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:36,621 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:36:36,621 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:36:36,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:36:36,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:36:36,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:36,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:36,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:36:36,625 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:36:36,625 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:36,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:36,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:36:36,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:36:36,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:36,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:36,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:36:36,629 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:36:36,629 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:36,629 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:36,630 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:36:36,631 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740 2024-11-23T06:36:36,632 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740 2024-11-23T06:36:36,634 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:36:36,634 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:36:36,635 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:36:36,637 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:36:36,638 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735918, jitterRate=-0.064232736825943}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:36:36,638 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:36:36,640 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343796621Writing region info on filesystem at 1732343796621Initializing all the Stores at 1732343796622 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343796622Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343796622Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343796622Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343796622Cleaning up temporary data from old regions at 1732343796634 (+12 ms)Running coprocessor post-open hooks at 1732343796638 (+4 ms)Region opened successfully at 1732343796639 (+1 ms) 2024-11-23T06:36:36,641 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343796592 2024-11-23T06:36:36,644 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:36:36,644 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:36:36,645 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,36413,1732343795518 2024-11-23T06:36:36,646 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,36413,1732343795518, state=OPEN 2024-11-23T06:36:36,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:36:36,676 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:36:36,677 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,36413,1732343795518 2024-11-23T06:36:36,677 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:36,677 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:36,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:36:36,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,36413,1732343795518 in 243 msec 2024-11-23T06:36:36,687 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:36:36,687 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 723 msec 2024-11-23T06:36:36,689 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:36,689 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:36:36,691 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:36:36,691 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,36413,1732343795518, seqNum=-1] 2024-11-23T06:36:36,692 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:36:36,694 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39785, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:36:36,703 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 798 msec 2024-11-23T06:36:36,703 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343796703, completionTime=-1 2024-11-23T06:36:36,703 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:36:36,703 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:36:36,705 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:36:36,705 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732343856705 2024-11-23T06:36:36,706 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732343916706 2024-11-23T06:36:36,706 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T06:36:36,706 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,37617,1732343795305-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,706 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,37617,1732343795305-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,706 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,37617,1732343795305-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,707 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:37617, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,707 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,707 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,709 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:36:36,713 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.113sec 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,37617,1732343795305-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:36:36,714 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,37617,1732343795305-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:36:36,717 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:36:36,717 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:36:36,717 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,37617,1732343795305-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:36,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd69191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:36:36,751 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,37617,-1 for getting cluster id 2024-11-23T06:36:36,751 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:36:36,753 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '54f36218-7e7b-4030-b287-3d968656b426' 2024-11-23T06:36:36,754 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:36:36,754 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "54f36218-7e7b-4030-b287-3d968656b426" 2024-11-23T06:36:36,755 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@111b08da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:36:36,755 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,37617,-1] 2024-11-23T06:36:36,755 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:36:36,755 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:36,757 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36896, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:36:36,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e299140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:36:36,758 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:36:36,760 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,36413,1732343795518, seqNum=-1] 2024-11-23T06:36:36,760 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:36:36,762 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:36:36,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,37617,1732343795305 2024-11-23T06:36:36,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:36,768 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:36:36,768 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:36:36,768 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:36:36,768 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:36:36,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:36,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:36,768 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:36:36,769 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:36:36,769 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2023939679, stopped=false 2024-11-23T06:36:36,769 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,37617,1732343795305 2024-11-23T06:36:36,789 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:36,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:36,789 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:36,789 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:36:36,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:36,789 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:36:36,789 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:36:36,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:36,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:36,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:36,790 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,36413,1732343795518' ***** 2024-11-23T06:36:36,790 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:36:36,790 INFO [RS:0;df2f15951535:36413 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:36:36,790 INFO [RS:0;df2f15951535:36413 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:36:36,790 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:36:36,790 INFO [RS:0;df2f15951535:36413 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:36:36,790 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(959): stopping server df2f15951535,36413,1732343795518 2024-11-23T06:36:36,790 INFO [RS:0;df2f15951535:36413 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:36:36,790 INFO [RS:0;df2f15951535:36413 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:36413. 2024-11-23T06:36:36,790 DEBUG [RS:0;df2f15951535:36413 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:36:36,790 DEBUG [RS:0;df2f15951535:36413 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:36,791 INFO [RS:0;df2f15951535:36413 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:36:36,791 INFO [RS:0;df2f15951535:36413 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:36:36,791 INFO [RS:0;df2f15951535:36413 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:36:36,791 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:36:36,791 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T06:36:36,791 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T06:36:36,791 DEBUG [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-23T06:36:36,791 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:36:36,791 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:36:36,791 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:36:36,791 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:36:36,791 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:36:36,792 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-23T06:36:36,809 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/.tmp/ns/dd4fc868c8fc4e9aa39e886a4556c6c2 is 43, key is default/ns:d/1732343796695/Put/seqid=0 2024-11-23T06:36:36,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741835_1011 (size=5153) 2024-11-23T06:36:36,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741835_1011 (size=5153) 2024-11-23T06:36:36,815 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/.tmp/ns/dd4fc868c8fc4e9aa39e886a4556c6c2 2024-11-23T06:36:36,826 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/.tmp/ns/dd4fc868c8fc4e9aa39e886a4556c6c2 as hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/ns/dd4fc868c8fc4e9aa39e886a4556c6c2 2024-11-23T06:36:36,834 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/ns/dd4fc868c8fc4e9aa39e886a4556c6c2, entries=2, sequenceid=6, filesize=5.0 K 2024-11-23T06:36:36,835 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-23T06:36:36,835 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T06:36:36,841 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T06:36:36,842 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:36:36,842 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:36:36,842 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343796791Running coprocessor pre-close hooks at 1732343796791Disabling compacts and flushes for region at 1732343796791Disabling writes for close at 1732343796791Obtaining lock to block concurrent updates at 1732343796792 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732343796792Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732343796792Flushing stores of hbase:meta,,1.1588230740 at 1732343796793 (+1 ms)Flushing 1588230740/ns: creating writer at 1732343796793Flushing 1588230740/ns: appending metadata at 1732343796808 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732343796808Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a594ce0: reopening flushed file at 1732343796824 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1732343796835 (+11 ms)Writing region close event to WAL at 1732343796837 (+2 ms)Running coprocessor post-close hooks at 1732343796842 (+5 ms)Closed at 1732343796842 2024-11-23T06:36:36,842 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:36:36,991 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(976): stopping server df2f15951535,36413,1732343795518; all regions closed. 2024-11-23T06:36:36,992 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:36,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:36,992 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:36,992 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:36,992 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:36,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741834_1010 (size=1152) 2024-11-23T06:36:36,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741834_1010 (size=1152) 2024-11-23T06:36:36,998 DEBUG [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/oldWALs 2024-11-23T06:36:36,998 INFO [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C36413%2C1732343795518.meta:.meta(num 1732343796610) 2024-11-23T06:36:37,000 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741833_1009 (size=93) 2024-11-23T06:36:37,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741833_1009 (size=93) 2024-11-23T06:36:37,013 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T06:36:37,014 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T06:36:37,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:37,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:37,409 DEBUG [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/oldWALs 2024-11-23T06:36:37,410 INFO [RS:0;df2f15951535:36413 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C36413%2C1732343795518:(num 1732343796156) 2024-11-23T06:36:37,410 DEBUG [RS:0;df2f15951535:36413 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:37,410 INFO [RS:0;df2f15951535:36413 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:36:37,410 INFO [RS:0;df2f15951535:36413 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:36:37,411 INFO [RS:0;df2f15951535:36413 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:36:37,411 INFO [RS:0;df2f15951535:36413 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:36:37,411 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:36:37,412 INFO [RS:0;df2f15951535:36413 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36413 2024-11-23T06:36:37,460 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,36413,1732343795518 2024-11-23T06:36:37,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:36:37,460 INFO [RS:0;df2f15951535:36413 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:36:37,462 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,36413,1732343795518] 2024-11-23T06:36:37,484 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,36413,1732343795518 already deleted, retry=false 2024-11-23T06:36:37,484 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,36413,1732343795518 expired; onlineServers=0 2024-11-23T06:36:37,484 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,37617,1732343795305' ***** 2024-11-23T06:36:37,484 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:36:37,484 INFO [M:0;df2f15951535:37617 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:36:37,485 INFO [M:0;df2f15951535:37617 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:36:37,485 DEBUG [M:0;df2f15951535:37617 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:36:37,485 DEBUG [M:0;df2f15951535:37617 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:36:37,485 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:36:37,485 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343795914 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343795914,5,FailOnTimeoutGroup] 2024-11-23T06:36:37,485 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343795914 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343795914,5,FailOnTimeoutGroup] 2024-11-23T06:36:37,486 INFO [M:0;df2f15951535:37617 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:36:37,486 INFO [M:0;df2f15951535:37617 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:36:37,486 DEBUG [M:0;df2f15951535:37617 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:36:37,486 INFO [M:0;df2f15951535:37617 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:36:37,487 INFO [M:0;df2f15951535:37617 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:36:37,487 INFO [M:0;df2f15951535:37617 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:36:37,487 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:36:37,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:36:37,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:37,494 DEBUG [M:0;df2f15951535:37617 {}] zookeeper.ZKUtil(347): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:36:37,494 WARN [M:0;df2f15951535:37617 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:36:37,495 INFO [M:0;df2f15951535:37617 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/.lastflushedseqids 2024-11-23T06:36:37,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741836_1012 (size=99) 2024-11-23T06:36:37,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741836_1012 (size=99) 2024-11-23T06:36:37,501 INFO [M:0;df2f15951535:37617 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:36:37,501 INFO [M:0;df2f15951535:37617 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:36:37,502 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:36:37,502 INFO [M:0;df2f15951535:37617 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:37,502 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:37,502 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:36:37,502 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:37,502 INFO [M:0;df2f15951535:37617 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-23T06:36:37,518 DEBUG [M:0;df2f15951535:37617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14b62c37bb874ee1840383ccbb89d0b7 is 82, key is hbase:meta,,1/info:regioninfo/1732343796645/Put/seqid=0 2024-11-23T06:36:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741837_1013 (size=5672) 2024-11-23T06:36:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741837_1013 (size=5672) 2024-11-23T06:36:37,524 INFO [M:0;df2f15951535:37617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14b62c37bb874ee1840383ccbb89d0b7 2024-11-23T06:36:37,546 DEBUG [M:0;df2f15951535:37617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/22b28ab2b0b0417f81496c9599c18010 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732343796702/Put/seqid=0 2024-11-23T06:36:37,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741838_1014 (size=5275) 2024-11-23T06:36:37,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741838_1014 (size=5275) 2024-11-23T06:36:37,552 INFO [M:0;df2f15951535:37617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/22b28ab2b0b0417f81496c9599c18010 2024-11-23T06:36:37,573 DEBUG [M:0;df2f15951535:37617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a54274d6ba6b451dadccd510005ee214 is 69, key is df2f15951535,36413,1732343795518/rs:state/1732343795988/Put/seqid=0 2024-11-23T06:36:37,573 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:37,573 INFO [RS:0;df2f15951535:36413 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:36:37,573 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36413-0x101666889fe0001, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:37,573 INFO [RS:0;df2f15951535:36413 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,36413,1732343795518; zookeeper connection closed. 2024-11-23T06:36:37,574 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44a8dc72 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44a8dc72 2024-11-23T06:36:37,574 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T06:36:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741839_1015 (size=5156) 2024-11-23T06:36:37,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741839_1015 (size=5156) 2024-11-23T06:36:37,578 INFO [M:0;df2f15951535:37617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a54274d6ba6b451dadccd510005ee214 2024-11-23T06:36:37,600 DEBUG [M:0;df2f15951535:37617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3ad717357c184537b59363990b5ff1a0 is 52, key is load_balancer_on/state:d/1732343796766/Put/seqid=0 2024-11-23T06:36:37,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741840_1016 (size=5056) 2024-11-23T06:36:37,606 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:36:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741840_1016 (size=5056) 2024-11-23T06:36:37,606 INFO [M:0;df2f15951535:37617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3ad717357c184537b59363990b5ff1a0 2024-11-23T06:36:37,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:37,614 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14b62c37bb874ee1840383ccbb89d0b7 as hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14b62c37bb874ee1840383ccbb89d0b7 2024-11-23T06:36:37,620 INFO [M:0;df2f15951535:37617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14b62c37bb874ee1840383ccbb89d0b7, entries=8, sequenceid=29, filesize=5.5 K 2024-11-23T06:36:37,622 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/22b28ab2b0b0417f81496c9599c18010 as hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/22b28ab2b0b0417f81496c9599c18010 2024-11-23T06:36:37,629 INFO [M:0;df2f15951535:37617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/22b28ab2b0b0417f81496c9599c18010, entries=3, sequenceid=29, filesize=5.2 K 2024-11-23T06:36:37,630 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a54274d6ba6b451dadccd510005ee214 as hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a54274d6ba6b451dadccd510005ee214 2024-11-23T06:36:37,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:37,638 INFO [M:0;df2f15951535:37617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a54274d6ba6b451dadccd510005ee214, entries=1, sequenceid=29, filesize=5.0 K 2024-11-23T06:36:37,639 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3ad717357c184537b59363990b5ff1a0 as hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3ad717357c184537b59363990b5ff1a0 2024-11-23T06:36:37,646 INFO [M:0;df2f15951535:37617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/0cc40936-ae9b-807a-833b-d444515fcde4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3ad717357c184537b59363990b5ff1a0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-23T06:36:37,647 INFO [M:0;df2f15951535:37617 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=29, compaction requested=false 2024-11-23T06:36:37,648 INFO [M:0;df2f15951535:37617 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:37,649 DEBUG [M:0;df2f15951535:37617 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343797502Disabling compacts and flushes for region at 1732343797502Disabling writes for close at 1732343797502Obtaining lock to block concurrent updates at 1732343797502Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343797502Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732343797502Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343797503 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343797503Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343797518 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343797518Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343797530 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343797545 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343797545Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343797558 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343797572 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343797572Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343797585 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343797599 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343797599Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@697d94d0: reopening flushed file at 1732343797612 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a1fb404: reopening flushed file at 1732343797620 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fdba48d: reopening flushed file at 1732343797629 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27d25742: reopening flushed file at 1732343797638 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=29, compaction requested=false at 1732343797647 (+9 ms)Writing region close event to WAL at 1732343797648 (+1 ms)Closed at 1732343797648 2024-11-23T06:36:37,649 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,649 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,649 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,649 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,649 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:37,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41149 is added to blk_1073741830_1006 (size=10311) 2024-11-23T06:36:37,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45925 is added to blk_1073741830_1006 (size=10311) 2024-11-23T06:36:37,653 INFO [M:0;df2f15951535:37617 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:36:37,653 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:36:37,653 INFO [M:0;df2f15951535:37617 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37617 2024-11-23T06:36:37,653 INFO [M:0;df2f15951535:37617 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:36:37,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:37,763 INFO [M:0;df2f15951535:37617 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:36:37,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37617-0x101666889fe0000, quorum=127.0.0.1:62289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:36:37,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b5e52bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:37,765 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@882842c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:37,766 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:37,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@573af0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:37,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5e070a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:37,767 WARN [BP-358645030-172.17.0.3-1732343792728 heartbeating to localhost/127.0.0.1:39029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:37,767 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:37,767 WARN [BP-358645030-172.17.0.3-1732343792728 heartbeating to localhost/127.0.0.1:39029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-358645030-172.17.0.3-1732343792728 (Datanode Uuid 90a1ee71-f90f-4f62-b3d1-c086b6600764) service to localhost/127.0.0.1:39029 2024-11-23T06:36:37,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:37,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data3/current/BP-358645030-172.17.0.3-1732343792728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:37,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data4/current/BP-358645030-172.17.0.3-1732343792728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:37,768 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:37,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cd60cfb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:37,774 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f88e14b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:37,774 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:37,774 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e7873b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:37,774 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa9c156{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:37,775 WARN [BP-358645030-172.17.0.3-1732343792728 heartbeating to localhost/127.0.0.1:39029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:37,775 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:37,775 WARN [BP-358645030-172.17.0.3-1732343792728 heartbeating to localhost/127.0.0.1:39029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-358645030-172.17.0.3-1732343792728 (Datanode Uuid d9b0e659-fcaf-4b11-a00e-11e31a2fabab) service to localhost/127.0.0.1:39029 2024-11-23T06:36:37,775 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:37,776 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data1/current/BP-358645030-172.17.0.3-1732343792728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:37,776 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/cluster_4f0b55b7-8ffe-091c-1e01-57e45a59b36f/data/data2/current/BP-358645030-172.17.0.3-1732343792728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:37,777 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:37,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ce0de36{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:36:37,783 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3614f662{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:37,783 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:37,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a15ed6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:37,783 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@641eaf99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:37,789 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.log.dir so I do NOT create it in target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/580d8c4a-298a-ec9d-01ef-7c453c5d3fc7/hadoop.tmp.dir so I do NOT create it in target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10, deleteOnExit=true 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/test.cache.data in system properties and HBase conf 2024-11-23T06:36:37,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:36:37,807 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:36:37,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:36:37,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:36:37,820 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:36:38,014 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:36:38,138 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:38,144 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:38,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:38,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:38,148 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:36:38,149 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:38,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab5393f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:38,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac253d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:38,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2295376c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-38721-hadoop-hdfs-3_4_1-tests_jar-_-any-11968311576765238349/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:36:38,246 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54adbc26{HTTP/1.1, (http/1.1)}{localhost:38721} 2024-11-23T06:36:38,246 INFO [Time-limited test {}] server.Server(415): Started @104243ms 2024-11-23T06:36:38,258 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:36:38,618 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:38,623 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:38,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:38,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:38,624 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:36:38,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@372d60ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:38,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a18c5e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:38,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bba803f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-34825-hadoop-hdfs-3_4_1-tests_jar-_-any-824963136254445443/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:38,720 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7629a449{HTTP/1.1, (http/1.1)}{localhost:34825} 2024-11-23T06:36:38,720 INFO [Time-limited test {}] server.Server(415): Started @104716ms 2024-11-23T06:36:38,721 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:38,748 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:38,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:38,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:38,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:38,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:36:38,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@217a95d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:38,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c64d82b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:38,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3efce601{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-32881-hadoop-hdfs-3_4_1-tests_jar-_-any-12921956397829639147/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:38,851 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e79a3d4{HTTP/1.1, (http/1.1)}{localhost:32881} 2024-11-23T06:36:38,851 INFO [Time-limited test {}] server.Server(415): Started @104848ms 2024-11-23T06:36:38,853 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:40,010 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data1/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:40,011 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data2/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:40,031 WARN [Thread-630 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:40,033 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5218688b966dfb6b with lease ID 0x436432b1c7dff459: Processing first storage report for DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7 from datanode DatanodeRegistration(127.0.0.1:41079, datanodeUuid=8e8699bc-1a9f-4cf4-886d-cb607aa1f071, infoPort=35323, infoSecurePort=0, ipcPort=35317, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:40,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5218688b966dfb6b with lease ID 0x436432b1c7dff459: from storage DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7 node DatanodeRegistration(127.0.0.1:41079, datanodeUuid=8e8699bc-1a9f-4cf4-886d-cb607aa1f071, infoPort=35323, infoSecurePort=0, ipcPort=35317, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:40,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5218688b966dfb6b with lease ID 0x436432b1c7dff459: Processing first storage report for DS-21706c32-0b6c-464c-bf91-cfa310a95b8a from datanode DatanodeRegistration(127.0.0.1:41079, datanodeUuid=8e8699bc-1a9f-4cf4-886d-cb607aa1f071, infoPort=35323, infoSecurePort=0, ipcPort=35317, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:40,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5218688b966dfb6b with lease ID 0x436432b1c7dff459: from storage DS-21706c32-0b6c-464c-bf91-cfa310a95b8a node DatanodeRegistration(127.0.0.1:41079, datanodeUuid=8e8699bc-1a9f-4cf4-886d-cb607aa1f071, infoPort=35323, infoSecurePort=0, ipcPort=35317, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:40,132 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data3/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:40,132 WARN [Thread-678 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data4/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:40,156 WARN [Thread-653 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:40,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ae0e3ddf1d91d70 with lease ID 0x436432b1c7dff45a: Processing first storage report for DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79 from datanode DatanodeRegistration(127.0.0.1:34055, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=41269, infoSecurePort=0, ipcPort=46661, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:40,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ae0e3ddf1d91d70 with lease ID 0x436432b1c7dff45a: from storage DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79 node DatanodeRegistration(127.0.0.1:34055, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=41269, infoSecurePort=0, ipcPort=46661, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:40,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ae0e3ddf1d91d70 with lease ID 0x436432b1c7dff45a: Processing first storage report for DS-68e2c096-89a5-4934-88e1-342e428b19cc from datanode DatanodeRegistration(127.0.0.1:34055, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=41269, infoSecurePort=0, ipcPort=46661, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:40,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ae0e3ddf1d91d70 with lease ID 0x436432b1c7dff45a: from storage DS-68e2c096-89a5-4934-88e1-342e428b19cc node DatanodeRegistration(127.0.0.1:34055, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=41269, infoSecurePort=0, ipcPort=46661, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:40,197 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26 2024-11-23T06:36:40,202 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/zookeeper_0, clientPort=62386, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:36:40,204 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62386 2024-11-23T06:36:40,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:36:40,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:36:40,217 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8 with version=8 2024-11-23T06:36:40,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:36:40,220 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:36:40,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:40,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:40,220 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:36:40,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:40,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:36:40,220 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:36:40,221 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:36:40,221 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35247 2024-11-23T06:36:40,223 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35247 connecting to ZooKeeper ensemble=127.0.0.1:62386 2024-11-23T06:36:40,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352470x0, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:36:40,279 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35247-0x10166689d340000 connected 2024-11-23T06:36:40,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:40,373 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8, hbase.cluster.distributed=false 2024-11-23T06:36:40,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:36:40,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35247 2024-11-23T06:36:40,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35247 2024-11-23T06:36:40,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35247 2024-11-23T06:36:40,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35247 2024-11-23T06:36:40,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35247 2024-11-23T06:36:40,399 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:36:40,399 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:36:40,400 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41671 2024-11-23T06:36:40,401 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41671 connecting to ZooKeeper ensemble=127.0.0.1:62386 2024-11-23T06:36:40,402 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,403 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416710x0, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:36:40,418 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41671-0x10166689d340001 connected 2024-11-23T06:36:40,418 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:36:40,418 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:36:40,419 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:36:40,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:36:40,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:36:40,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41671 2024-11-23T06:36:40,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41671 2024-11-23T06:36:40,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41671 2024-11-23T06:36:40,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41671 2024-11-23T06:36:40,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41671 2024-11-23T06:36:40,441 DEBUG [M:0;df2f15951535:35247 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:35247 2024-11-23T06:36:40,441 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,35247,1732343800220 2024-11-23T06:36:40,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:40,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:40,452 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,35247,1732343800220 2024-11-23T06:36:40,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:36:40,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,463 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:36:40,464 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,35247,1732343800220 from backup master directory 2024-11-23T06:36:40,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,35247,1732343800220 2024-11-23T06:36:40,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:40,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:36:40,473 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:36:40,473 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,35247,1732343800220 2024-11-23T06:36:40,477 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/hbase.id] with ID: 152306c7-a7ab-4b4c-a197-92facc129627 2024-11-23T06:36:40,477 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/.tmp/hbase.id 2024-11-23T06:36:40,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:36:40,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:36:40,484 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/.tmp/hbase.id]:[hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/hbase.id] 2024-11-23T06:36:40,497 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:40,498 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:36:40,499 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-23T06:36:40,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:36:40,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:36:40,520 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:36:40,521 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:36:40,521 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:40,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:36:40,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:36:40,532 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store 2024-11-23T06:36:40,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:36:40,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:36:40,541 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:40,541 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:36:40,541 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:40,541 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:40,542 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:36:40,542 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:40,542 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:36:40,542 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343800541Disabling compacts and flushes for region at 1732343800541Disabling writes for close at 1732343800542 (+1 ms)Writing region close event to WAL at 1732343800542Closed at 1732343800542 2024-11-23T06:36:40,543 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/.initializing 2024-11-23T06:36:40,543 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220 2024-11-23T06:36:40,546 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C35247%2C1732343800220, suffix=, logDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220, archiveDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/oldWALs, maxLogs=10 2024-11-23T06:36:40,546 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C35247%2C1732343800220.1732343800546 2024-11-23T06:36:40,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 2024-11-23T06:36:40,552 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41269:41269),(127.0.0.1/127.0.0.1:35323:35323)] 2024-11-23T06:36:40,552 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:36:40,553 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:40,553 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,553 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:36:40,556 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:40,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:36:40,558 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:40,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:36:40,560 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:40,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:36:40,562 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:40,563 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,564 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,564 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,566 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,566 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,567 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:36:40,568 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:36:40,572 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:36:40,572 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841477, jitterRate=0.06999355554580688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:36:40,574 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343800553Initializing all the Stores at 1732343800554 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343800554Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343800554Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343800554Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343800554Cleaning up temporary data from old regions at 1732343800566 (+12 ms)Region opened successfully at 1732343800574 (+8 ms) 2024-11-23T06:36:40,574 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:36:40,578 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2614c8e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:36:40,579 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:36:40,579 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:36:40,579 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:36:40,580 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:36:40,580 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T06:36:40,581 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T06:36:40,581 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:36:40,583 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:36:40,584 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:36:40,596 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:36:40,596 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:36:40,597 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:36:40,609 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:36:40,610 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:36:40,612 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:36:40,620 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:36:40,622 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:36:40,631 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:36:40,637 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:36:40,649 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:36:40,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:40,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:36:40,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,660 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,35247,1732343800220, sessionid=0x10166689d340000, setting cluster-up flag (Was=false) 2024-11-23T06:36:40,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,715 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:36:40,716 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,35247,1732343800220 2024-11-23T06:36:40,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:40,768 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:36:40,772 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,35247,1732343800220 2024-11-23T06:36:40,776 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:36:40,780 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:40,781 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:36:40,781 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:36:40,782 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,35247,1732343800220 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:36:40,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,785 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732343830785 2024-11-23T06:36:40,785 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:36:40,785 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:36:40,785 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:36:40,786 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:36:40,786 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:36:40,786 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:36:40,786 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,786 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:40,786 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:36:40,787 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:36:40,787 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:36:40,787 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:36:40,787 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:36:40,787 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:36:40,788 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343800787,5,FailOnTimeoutGroup] 2024-11-23T06:36:40,788 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,788 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343800788,5,FailOnTimeoutGroup] 2024-11-23T06:36:40,788 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,788 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:36:40,788 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,788 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,788 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:36:40,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:36:40,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:36:40,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:36:40,798 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8 2024-11-23T06:36:40,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:36:40,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:36:40,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:40,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:36:40,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:36:40,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:40,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:36:40,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:36:40,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:40,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:36:40,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:36:40,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:40,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:36:40,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:36:40,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:40,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:40,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:36:40,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740 2024-11-23T06:36:40,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740 2024-11-23T06:36:40,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:36:40,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:36:40,818 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:36:40,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:36:40,822 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:36:40,823 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824963, jitterRate=0.04899528622627258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:36:40,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343800805Initializing all the Stores at 1732343800806 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343800806Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343800806Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343800806Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343800806Cleaning up temporary data from old regions at 1732343800818 (+12 ms)Region opened successfully at 1732343800823 (+5 ms) 2024-11-23T06:36:40,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:36:40,824 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:36:40,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:36:40,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:36:40,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:36:40,824 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:36:40,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343800824Disabling compacts and flushes for region at 1732343800824Disabling writes for close at 1732343800824Writing region close event to WAL at 1732343800824Closed at 1732343800824 2024-11-23T06:36:40,826 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:40,826 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:36:40,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:36:40,827 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(746): ClusterId : 152306c7-a7ab-4b4c-a197-92facc129627 2024-11-23T06:36:40,827 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:36:40,828 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:36:40,829 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:36:40,839 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:36:40,839 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:36:40,852 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:36:40,853 DEBUG [RS:0;df2f15951535:41671 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@675b9aaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:36:40,870 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:41671 2024-11-23T06:36:40,870 INFO [RS:0;df2f15951535:41671 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:36:40,870 INFO [RS:0;df2f15951535:41671 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:36:40,870 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:36:40,871 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,35247,1732343800220 with port=41671, startcode=1732343800399 2024-11-23T06:36:40,871 DEBUG [RS:0;df2f15951535:41671 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:36:40,873 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:36:40,874 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35247 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,41671,1732343800399 2024-11-23T06:36:40,874 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35247 {}] master.ServerManager(517): Registering regionserver=df2f15951535,41671,1732343800399 2024-11-23T06:36:40,876 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8 2024-11-23T06:36:40,876 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41685 2024-11-23T06:36:40,876 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:36:40,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:36:40,884 DEBUG [RS:0;df2f15951535:41671 {}] zookeeper.ZKUtil(111): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,41671,1732343800399 2024-11-23T06:36:40,884 WARN [RS:0;df2f15951535:41671 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:36:40,884 INFO [RS:0;df2f15951535:41671 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:40,884 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399 2024-11-23T06:36:40,884 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,41671,1732343800399] 2024-11-23T06:36:40,888 INFO [RS:0;df2f15951535:41671 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:36:40,890 INFO [RS:0;df2f15951535:41671 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:36:40,890 INFO [RS:0;df2f15951535:41671 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:36:40,890 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,891 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:36:40,892 INFO [RS:0;df2f15951535:41671 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:36:40,892 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,892 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,893 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,893 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:40,893 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:36:40,893 DEBUG [RS:0;df2f15951535:41671 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:36:40,894 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,894 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,894 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,894 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,894 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,894 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41671,1732343800399-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:36:40,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:36:40,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:36:40,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-23T06:36:40,909 INFO [RS:0;df2f15951535:41671 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:36:40,909 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41671,1732343800399-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,909 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,910 INFO [RS:0;df2f15951535:41671 {}] regionserver.Replication(171): df2f15951535,41671,1732343800399 started 2024-11-23T06:36:40,924 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:40,925 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,41671,1732343800399, RpcServer on df2f15951535/172.17.0.3:41671, sessionid=0x10166689d340001 2024-11-23T06:36:40,925 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:36:40,925 DEBUG [RS:0;df2f15951535:41671 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,41671,1732343800399 2024-11-23T06:36:40,925 DEBUG [RS:0;df2f15951535:41671 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,41671,1732343800399' 2024-11-23T06:36:40,925 DEBUG [RS:0;df2f15951535:41671 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:36:40,926 DEBUG [RS:0;df2f15951535:41671 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:36:40,926 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:36:40,926 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:36:40,926 DEBUG [RS:0;df2f15951535:41671 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,41671,1732343800399 2024-11-23T06:36:40,926 DEBUG [RS:0;df2f15951535:41671 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,41671,1732343800399' 2024-11-23T06:36:40,926 DEBUG [RS:0;df2f15951535:41671 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:36:40,927 DEBUG [RS:0;df2f15951535:41671 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:36:40,927 DEBUG [RS:0;df2f15951535:41671 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:36:40,927 INFO [RS:0;df2f15951535:41671 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:36:40,927 INFO [RS:0;df2f15951535:41671 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:36:40,980 WARN [df2f15951535:35247 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T06:36:41,030 INFO [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41671%2C1732343800399, suffix=, logDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399, archiveDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs, maxLogs=32 2024-11-23T06:36:41,032 INFO [RS:0;df2f15951535:41671 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.1732343801031 2024-11-23T06:36:41,038 INFO [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 2024-11-23T06:36:41,044 DEBUG [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35323:35323),(127.0.0.1/127.0.0.1:41269:41269)] 2024-11-23T06:36:41,230 DEBUG [df2f15951535:35247 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:36:41,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,41671,1732343800399 2024-11-23T06:36:41,234 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,41671,1732343800399, state=OPENING 2024-11-23T06:36:41,280 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:36:41,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:41,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:36:41,296 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:36:41,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,41671,1732343800399}] 2024-11-23T06:36:41,296 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:41,296 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:41,454 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:36:41,460 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54117, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:36:41,467 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:36:41,467 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:41,470 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41671%2C1732343800399.meta, suffix=.meta, logDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399, archiveDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs, maxLogs=32 2024-11-23T06:36:41,472 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta 2024-11-23T06:36:41,480 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta 2024-11-23T06:36:41,488 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41269:41269),(127.0.0.1/127.0.0.1:35323:35323)] 2024-11-23T06:36:41,492 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:36:41,492 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:36:41,492 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:36:41,493 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:36:41,493 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:36:41,493 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:41,493 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:36:41,493 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:36:41,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:36:41,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:36:41,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:41,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:41,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:36:41,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:36:41,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:41,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:41,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:36:41,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:36:41,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:41,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:41,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:36:41,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:36:41,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:41,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:36:41,501 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:36:41,502 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740 2024-11-23T06:36:41,503 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740 2024-11-23T06:36:41,504 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:36:41,504 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:36:41,505 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:36:41,507 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:36:41,508 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724681, jitterRate=-0.07852154970169067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:36:41,508 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:36:41,530 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343801493Writing region info on filesystem at 1732343801493Initializing all the Stores at 1732343801494 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343801494Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343801494Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343801494Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343801494Cleaning up temporary data from old regions at 1732343801504 (+10 ms)Running coprocessor post-open hooks at 1732343801508 (+4 ms)Region opened successfully at 1732343801529 (+21 ms) 2024-11-23T06:36:41,531 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343801454 2024-11-23T06:36:41,534 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:36:41,534 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:36:41,535 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,41671,1732343800399 2024-11-23T06:36:41,536 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,41671,1732343800399, state=OPEN 2024-11-23T06:36:41,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:36:41,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:36:41,659 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,41671,1732343800399 2024-11-23T06:36:41,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:41,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:36:41,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:36:41,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,41671,1732343800399 in 363 msec 2024-11-23T06:36:41,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:36:41,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 838 msec 2024-11-23T06:36:41,671 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:36:41,671 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:36:41,673 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:36:41,673 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,41671,1732343800399, seqNum=-1] 2024-11-23T06:36:41,673 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:36:41,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:36:41,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 901 msec 2024-11-23T06:36:41,680 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343801680, completionTime=-1 2024-11-23T06:36:41,680 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:36:41,680 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:36:41,682 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:36:41,682 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732343861682 2024-11-23T06:36:41,682 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732343921682 2024-11-23T06:36:41,682 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T06:36:41,683 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,35247,1732343800220-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,683 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,35247,1732343800220-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,683 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,35247,1732343800220-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,683 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:35247, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,683 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,683 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,685 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.214sec 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,35247,1732343800220-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:36:41,687 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,35247,1732343800220-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:36:41,690 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:36:41,690 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:36:41,690 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,35247,1732343800220-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bb75251, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:36:41,731 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,35247,-1 for getting cluster id 2024-11-23T06:36:41,731 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:36:41,734 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '152306c7-a7ab-4b4c-a197-92facc129627' 2024-11-23T06:36:41,734 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:36:41,734 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "152306c7-a7ab-4b4c-a197-92facc129627" 2024-11-23T06:36:41,735 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@356369f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:36:41,735 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,35247,-1] 2024-11-23T06:36:41,736 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:36:41,736 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:36:41,738 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55160, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:36:41,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f5a74a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:36:41,739 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:36:41,741 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,41671,1732343800399, seqNum=-1] 2024-11-23T06:36:41,741 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:36:41,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33912, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:36:41,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,35247,1732343800220 2024-11-23T06:36:41,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:41,750 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:36:41,768 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:36:41,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:36:41,769 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44895 2024-11-23T06:36:41,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44895 connecting to ZooKeeper ensemble=127.0.0.1:62386 2024-11-23T06:36:41,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:41,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:36:41,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448950x0, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:36:41,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-23T06:36:41,797 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44895-0x10166689d340002 connected 2024-11-23T06:36:41,797 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-23T06:36:41,798 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:36:41,798 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:36:41,799 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:36:41,801 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:36:41,802 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44895 2024-11-23T06:36:41,802 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44895 2024-11-23T06:36:41,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44895 2024-11-23T06:36:41,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44895 2024-11-23T06:36:41,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44895 2024-11-23T06:36:41,807 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(746): ClusterId : 152306c7-a7ab-4b4c-a197-92facc129627 2024-11-23T06:36:41,807 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:36:41,821 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:36:41,821 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:36:41,831 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:36:41,832 DEBUG [RS:1;df2f15951535:44895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a535180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:36:41,844 DEBUG [RS:1;df2f15951535:44895 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;df2f15951535:44895 2024-11-23T06:36:41,844 INFO [RS:1;df2f15951535:44895 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:36:41,844 INFO [RS:1;df2f15951535:44895 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:36:41,844 DEBUG [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:36:41,845 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,35247,1732343800220 with port=44895, startcode=1732343801767 2024-11-23T06:36:41,845 DEBUG [RS:1;df2f15951535:44895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:36:41,847 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48087, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:36:41,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35247 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,44895,1732343801767 2024-11-23T06:36:41,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35247 {}] master.ServerManager(517): Registering regionserver=df2f15951535,44895,1732343801767 2024-11-23T06:36:41,849 DEBUG [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8 2024-11-23T06:36:41,849 DEBUG [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41685 2024-11-23T06:36:41,849 DEBUG [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:36:41,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:36:41,860 DEBUG [RS:1;df2f15951535:44895 {}] zookeeper.ZKUtil(111): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,44895,1732343801767 2024-11-23T06:36:41,860 WARN [RS:1;df2f15951535:44895 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:36:41,860 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,44895,1732343801767] 2024-11-23T06:36:41,860 INFO [RS:1;df2f15951535:44895 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:36:41,860 DEBUG [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767 2024-11-23T06:36:41,864 INFO [RS:1;df2f15951535:44895 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:36:41,867 INFO [RS:1;df2f15951535:44895 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:36:41,867 INFO [RS:1;df2f15951535:44895 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:36:41,867 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,868 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:36:41,869 INFO [RS:1;df2f15951535:44895 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:36:41,869 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,869 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,869 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,869 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,869 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,869 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,869 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:36:41,870 DEBUG [RS:1;df2f15951535:44895 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:36:41,871 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,871 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,871 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,871 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,871 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,871 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44895,1732343801767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:36:41,884 INFO [RS:1;df2f15951535:44895 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:36:41,884 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44895,1732343801767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,884 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,884 INFO [RS:1;df2f15951535:44895 {}] regionserver.Replication(171): df2f15951535,44895,1732343801767 started 2024-11-23T06:36:41,897 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:36:41,898 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,44895,1732343801767, RpcServer on df2f15951535/172.17.0.3:44895, sessionid=0x10166689d340002 2024-11-23T06:36:41,898 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:36:41,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;df2f15951535:44895,5,FailOnTimeoutGroup] 2024-11-23T06:36:41,898 DEBUG [RS:1;df2f15951535:44895 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,44895,1732343801767 2024-11-23T06:36:41,898 DEBUG [RS:1;df2f15951535:44895 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,44895,1732343801767' 2024-11-23T06:36:41,898 DEBUG [RS:1;df2f15951535:44895 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:36:41,898 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-23T06:36:41,898 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T06:36:41,898 DEBUG [RS:1;df2f15951535:44895 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:36:41,899 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:36:41,899 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:36:41,899 DEBUG [RS:1;df2f15951535:44895 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,44895,1732343801767 2024-11-23T06:36:41,899 DEBUG [RS:1;df2f15951535:44895 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,44895,1732343801767' 2024-11-23T06:36:41,899 DEBUG [RS:1;df2f15951535:44895 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:36:41,899 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is df2f15951535,35247,1732343800220 2024-11-23T06:36:41,900 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@25cff401 2024-11-23T06:36:41,900 DEBUG [RS:1;df2f15951535:44895 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:36:41,900 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T06:36:41,900 DEBUG [RS:1;df2f15951535:44895 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:36:41,900 INFO [RS:1;df2f15951535:44895 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:36:41,900 INFO [RS:1;df2f15951535:44895 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:36:41,902 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37152, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T06:36:41,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T06:36:41,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T06:36:41,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:36:41,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T06:36:41,905 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T06:36:41,905 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:41,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-23T06:36:41,906 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T06:36:41,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:36:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741835_1011 (size=393) 2024-11-23T06:36:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741835_1011 (size=393) 2024-11-23T06:36:41,915 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 506342cf61e627db185b553c0e4ba305, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8 2024-11-23T06:36:41,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34055 is added to blk_1073741836_1012 (size=76) 2024-11-23T06:36:41,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41079 is added to blk_1073741836_1012 (size=76) 2024-11-23T06:36:41,923 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:41,923 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 506342cf61e627db185b553c0e4ba305, disabling compactions & flushes 2024-11-23T06:36:41,923 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:41,923 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:41,923 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. after waiting 0 ms 2024-11-23T06:36:41,923 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:41,923 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:41,923 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 506342cf61e627db185b553c0e4ba305: Waiting for close lock at 1732343801923Disabling compacts and flushes for region at 1732343801923Disabling writes for close at 1732343801923Writing region close event to WAL at 1732343801923Closed at 1732343801923 2024-11-23T06:36:41,925 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T06:36:41,926 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732343801925"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343801925"}]},"ts":"1732343801925"} 2024-11-23T06:36:41,929 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T06:36:41,930 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T06:36:41,930 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343801930"}]},"ts":"1732343801930"} 2024-11-23T06:36:41,933 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-23T06:36:41,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=506342cf61e627db185b553c0e4ba305, ASSIGN}] 2024-11-23T06:36:41,935 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=506342cf61e627db185b553c0e4ba305, ASSIGN 2024-11-23T06:36:41,936 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=506342cf61e627db185b553c0e4ba305, ASSIGN; state=OFFLINE, location=df2f15951535,41671,1732343800399; forceNewPlan=false, retain=false 2024-11-23T06:36:42,003 INFO [RS:1;df2f15951535:44895 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C44895%2C1732343801767, suffix=, logDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767, archiveDir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs, maxLogs=32 2024-11-23T06:36:42,004 INFO [RS:1;df2f15951535:44895 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C44895%2C1732343801767.1732343802004 2024-11-23T06:36:42,011 INFO [RS:1;df2f15951535:44895 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 2024-11-23T06:36:42,014 DEBUG [RS:1;df2f15951535:44895 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41269:41269),(127.0.0.1/127.0.0.1:35323:35323)] 2024-11-23T06:36:42,087 INFO [df2f15951535:35247 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T06:36:42,088 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=506342cf61e627db185b553c0e4ba305, regionState=OPENING, regionLocation=df2f15951535,41671,1732343800399 2024-11-23T06:36:42,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=506342cf61e627db185b553c0e4ba305, ASSIGN because future has completed 2024-11-23T06:36:42,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 506342cf61e627db185b553c0e4ba305, server=df2f15951535,41671,1732343800399}] 2024-11-23T06:36:42,264 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:42,264 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 506342cf61e627db185b553c0e4ba305, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:36:42,265 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,265 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:36:42,265 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,265 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,266 INFO [StoreOpener-506342cf61e627db185b553c0e4ba305-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,268 INFO [StoreOpener-506342cf61e627db185b553c0e4ba305-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 506342cf61e627db185b553c0e4ba305 columnFamilyName info 2024-11-23T06:36:42,268 DEBUG [StoreOpener-506342cf61e627db185b553c0e4ba305-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:36:42,269 INFO [StoreOpener-506342cf61e627db185b553c0e4ba305-1 {}] regionserver.HStore(327): Store=506342cf61e627db185b553c0e4ba305/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:36:42,269 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,269 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,270 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,270 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,270 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,272 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,275 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:36:42,275 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 506342cf61e627db185b553c0e4ba305; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729039, jitterRate=-0.07297930121421814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:36:42,275 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:36:42,276 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 506342cf61e627db185b553c0e4ba305: Running coprocessor pre-open hook at 1732343802265Writing region info on filesystem at 1732343802265Initializing all the Stores at 1732343802266 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343802266Cleaning up temporary data from old regions at 1732343802270 (+4 ms)Running coprocessor post-open hooks at 1732343802275 (+5 ms)Region opened successfully at 1732343802276 (+1 ms) 2024-11-23T06:36:42,277 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305., pid=6, masterSystemTime=1732343802252 2024-11-23T06:36:42,279 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:42,279 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:42,280 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=506342cf61e627db185b553c0e4ba305, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,41671,1732343800399 2024-11-23T06:36:42,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 506342cf61e627db185b553c0e4ba305, server=df2f15951535,41671,1732343800399 because future has completed 2024-11-23T06:36:42,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T06:36:42,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 506342cf61e627db185b553c0e4ba305, server=df2f15951535,41671,1732343800399 in 188 msec 2024-11-23T06:36:42,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T06:36:42,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=506342cf61e627db185b553c0e4ba305, ASSIGN in 354 msec 2024-11-23T06:36:42,292 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T06:36:42,292 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343802292"}]},"ts":"1732343802292"} 2024-11-23T06:36:42,295 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-23T06:36:42,297 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T06:36:42,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 395 msec 2024-11-23T06:36:46,410 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:36:46,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:46,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:46,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:46,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:36:46,889 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-23T06:36:50,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:36:50,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T06:36:50,909 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T06:36:50,909 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-23T06:36:50,911 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:36:50,911 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T06:36:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35247 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:36:51,929 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-23T06:36:51,929 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-23T06:36:51,936 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T06:36:51,936 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:36:51,949 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:51,953 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:51,954 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:51,954 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:51,954 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:36:51,955 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5dc4ef73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:51,955 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@157a3fca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:52,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2801262{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-37005-hadoop-hdfs-3_4_1-tests_jar-_-any-6907639047579581816/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:52,051 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fb911ed{HTTP/1.1, (http/1.1)}{localhost:37005} 2024-11-23T06:36:52,051 INFO [Time-limited test {}] server.Server(415): Started @118048ms 2024-11-23T06:36:52,052 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:52,082 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:52,085 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:52,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:52,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:52,086 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:36:52,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bea65f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:52,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ccc1bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:52,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66046020{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-41277-hadoop-hdfs-3_4_1-tests_jar-_-any-10052217326454592137/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:52,180 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b86931{HTTP/1.1, (http/1.1)}{localhost:41277} 2024-11-23T06:36:52,180 INFO [Time-limited test {}] server.Server(415): Started @118177ms 2024-11-23T06:36:52,181 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:52,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:36:52,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:36:52,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:36:52,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:36:52,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:36:52,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@158a9d8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:36:52,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3247fd57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:36:52,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@befca3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-36337-hadoop-hdfs-3_4_1-tests_jar-_-any-16252294169769021422/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:52,306 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@339da5f9{HTTP/1.1, (http/1.1)}{localhost:36337} 2024-11-23T06:36:52,306 INFO [Time-limited test {}] server.Server(415): Started @118302ms 2024-11-23T06:36:52,307 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:36:53,835 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:53,836 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:53,836 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data5/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:53,836 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data6/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:53,858 WARN [Thread-824 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:53,858 WARN [Thread-802 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc4e929935ae5414 with lease ID 0x436432b1c7dff45c: Processing first storage report for DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461 from datanode DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc4e929935ae5414 with lease ID 0x436432b1c7dff45c: from storage DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461 node DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b2ea10f8e549cd with lease ID 0x436432b1c7dff45b: Processing first storage report for DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2 from datanode DatanodeRegistration(127.0.0.1:33139, datanodeUuid=a1b48651-abe7-40b2-ada9-65a795c28f0f, infoPort=34637, infoSecurePort=0, ipcPort=36461, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b2ea10f8e549cd with lease ID 0x436432b1c7dff45b: from storage DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2 node DatanodeRegistration(127.0.0.1:33139, datanodeUuid=a1b48651-abe7-40b2-ada9-65a795c28f0f, infoPort=34637, infoSecurePort=0, ipcPort=36461, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc4e929935ae5414 with lease ID 0x436432b1c7dff45c: Processing first storage report for DS-612b7879-7fbf-45a3-bbe2-dc1a34044cac from datanode DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc4e929935ae5414 with lease ID 0x436432b1c7dff45c: from storage DS-612b7879-7fbf-45a3-bbe2-dc1a34044cac node DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b2ea10f8e549cd with lease ID 0x436432b1c7dff45b: Processing first storage report for DS-68b7c65f-8117-42d4-8882-dbe88b8e5eb2 from datanode DatanodeRegistration(127.0.0.1:33139, datanodeUuid=a1b48651-abe7-40b2-ada9-65a795c28f0f, infoPort=34637, infoSecurePort=0, ipcPort=36461, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:53,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b2ea10f8e549cd with lease ID 0x436432b1c7dff45b: from storage DS-68b7c65f-8117-42d4-8882-dbe88b8e5eb2 node DatanodeRegistration(127.0.0.1:33139, datanodeUuid=a1b48651-abe7-40b2-ada9-65a795c28f0f, infoPort=34637, infoSecurePort=0, ipcPort=36461, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:53,874 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data9/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:53,874 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data10/current/BP-1232200571-172.17.0.3-1732343797832/current, will proceed with Du for space computation calculation, 2024-11-23T06:36:53,892 WARN [Thread-846 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:36:53,894 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60d4b17febf0ce45 with lease ID 0x436432b1c7dff45d: Processing first storage report for DS-0b08c883-3263-451a-944d-6e835950ad14 from datanode DatanodeRegistration(127.0.0.1:45047, datanodeUuid=37169f15-79e2-414b-b207-3b80a26e70b1, infoPort=37713, infoSecurePort=0, ipcPort=42521, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:53,894 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60d4b17febf0ce45 with lease ID 0x436432b1c7dff45d: from storage DS-0b08c883-3263-451a-944d-6e835950ad14 node DatanodeRegistration(127.0.0.1:45047, datanodeUuid=37169f15-79e2-414b-b207-3b80a26e70b1, infoPort=37713, infoSecurePort=0, ipcPort=42521, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:53,894 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60d4b17febf0ce45 with lease ID 0x436432b1c7dff45d: Processing first storage report for DS-95190532-c9f1-4480-9099-e8212b4d4df6 from datanode DatanodeRegistration(127.0.0.1:45047, datanodeUuid=37169f15-79e2-414b-b207-3b80a26e70b1, infoPort=37713, infoSecurePort=0, ipcPort=42521, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832) 2024-11-23T06:36:53,894 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60d4b17febf0ce45 with lease ID 0x436432b1c7dff45d: from storage DS-95190532-c9f1-4480-9099-e8212b4d4df6 node DatanodeRegistration(127.0.0.1:45047, datanodeUuid=37169f15-79e2-414b-b207-3b80a26e70b1, infoPort=37713, infoSecurePort=0, ipcPort=42521, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:36:53,944 WARN [ResponseProcessor for block BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,944 WARN [ResponseProcessor for block BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,944 WARN [ResponseProcessor for block BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,945 WARN [ResponseProcessor for block BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,945 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta block BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:36:53,945 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:36:53,945 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:36:53,946 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:36:53,946 WARN [PacketResponder: BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34055] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-120231856_22 at /127.0.0.1:59184 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59184 dst: /127.0.0.1:34055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1185269503_22 at /127.0.0.1:54940 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54940 dst: /127.0.0.1:34055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1185269503_22 at /127.0.0.1:49194 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41079:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49194 dst: /127.0.0.1:41079 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:44288 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41079:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44288 dst: /127.0.0.1:41079 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3efce601{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:53,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-120231856_22 at /127.0.0.1:44268 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41079:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44268 dst: /127.0.0.1:41079 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:59220 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59220 dst: /127.0.0.1:34055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,949 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e79a3d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:53,949 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:53,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:44302 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41079:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44302 dst: /127.0.0.1:41079 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:59210 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59210 dst: /127.0.0.1:34055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c64d82b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:53,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@217a95d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:53,951 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:53,951 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:53,951 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1232200571-172.17.0.3-1732343797832 (Datanode Uuid d5007248-62f8-432a-8a26-b16b6214d840) service to localhost/127.0.0.1:41685 2024-11-23T06:36:53,951 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:53,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data4/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:53,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data3/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:53,953 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:53,953 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@3d55efa1 {}] datanode.DataXceiver(331): 127.0.0.1:41079:DataXceiver error processing unknown operation src: /127.0.0.1:43520 dst: /127.0.0.1:41079 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:53,953 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,953 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,954 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,956 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta block BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bba803f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:53,958 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7629a449{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:53,958 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:53,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a18c5e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:53,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@372d60ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:53,959 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:53,959 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:53,959 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:53,959 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1232200571-172.17.0.3-1732343797832 (Datanode Uuid 8e8699bc-1a9f-4cf4-886d-cb607aa1f071) service to localhost/127.0.0.1:41685 2024-11-23T06:36:53,960 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data1/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:53,960 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data2/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:53,960 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:53,964 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305., hostname=df2f15951535,41671,1732343800399, seqNum=2] 2024-11-23T06:36:53,966 ERROR [FSHLog-0-hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8-prefix:df2f15951535,41671,1732343800399 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,966 WARN [FSHLog-0-hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8-prefix:df2f15951535,41671,1732343800399 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,966 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,967 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41671%2C1732343800399:(num 1732343801031) roll requested 2024-11-23T06:36:53,967 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.1732343813967 2024-11-23T06:36:53,973 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:53,973 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:53,973 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:53,973 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:53,973 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:53,973 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343813967 2024-11-23T06:36:53,974 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,974 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:53,974 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:34637:34637)] 2024-11-23T06:36:53,974 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:36:53,975 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-23T06:36:53,975 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-23T06:36:53,975 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 2024-11-23T06:36:53,978 WARN [IPC Server handler 0 on default port 41685 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-23T06:36:53,981 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 after 4ms 2024-11-23T06:36:54,523 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:55,872 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:55,974 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:55,976 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343813967 2024-11-23T06:36:55,977 WARN [ResponseProcessor for block BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:55,978 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343813967 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:36:55,978 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:41658 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:45047:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41658 dst: /127.0.0.1:45047 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:55,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:38702 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38702 dst: /127.0.0.1:33139 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:56,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@befca3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:36:56,075 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@339da5f9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:36:56,075 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:36:56,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3247fd57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:36:56,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@158a9d8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:36:56,078 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:36:56,078 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:36:56,078 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1232200571-172.17.0.3-1732343797832 (Datanode Uuid 37169f15-79e2-414b-b207-3b80a26e70b1) service to localhost/127.0.0.1:41685 2024-11-23T06:36:56,078 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:36:56,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data9/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:56,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data10/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:36:56,080 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:36:56,524 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:57,872 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:57,975 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:57,976 WARN [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]] 2024-11-23T06:36:57,976 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41671%2C1732343800399:(num 1732343813967) roll requested 2024-11-23T06:36:57,977 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.1732343817976 2024-11-23T06:36:57,982 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 after 4007ms 2024-11-23T06:36:57,983 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:57,983 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:36:57,983 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741839_1021 2024-11-23T06:36:57,987 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:36:57,990 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:57,990 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:36:57,990 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741840_1022 2024-11-23T06:36:57,990 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:36:57,993 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:57,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48704 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741841_1023 to mirror 127.0.0.1:45047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:57,993 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:36:57,993 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741841_1023 2024-11-23T06:36:57,993 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48704 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:36:57,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48704 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48704 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:36:57,994 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:36:58,001 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:58,001 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:58,001 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:58,001 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:58,002 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:36:58,002 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343813967 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343817976 2024-11-23T06:36:58,003 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44083:44083),(127.0.0.1/127.0.0.1:34637:34637)] 2024-11-23T06:36:58,003 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:36:58,003 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343813967 is not closed yet, will try archiving it next time 2024-11-23T06:36:58,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33139 is added to blk_1073741838_1020 (size=2431) 2024-11-23T06:36:58,087 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T06:36:58,406 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:36:58,525 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:59,873 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:36:59,880 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@64beaf3e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33139, datanodeUuid=a1b48651-abe7-40b2-ada9-65a795c28f0f, infoPort=34637, infoSecurePort=0, ipcPort=36461, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741838_1020 to 127.0.0.1:45047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,003 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,093 WARN [ResponseProcessor for block BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,094 WARN [DataStreamer for file /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343817976 block BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:00,095 WARN [PacketResponder: BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33139] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,096 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48720 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48720 dst: /127.0.0.1:43327 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,096 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:38730 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38730 dst: /127.0.0.1:33139 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2801262{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:00,098 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fb911ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:00,099 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:00,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@157a3fca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:00,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5dc4ef73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:00,100 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:00,100 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1232200571-172.17.0.3-1732343797832 (Datanode Uuid a1b48651-abe7-40b2-ada9-65a795c28f0f) service to localhost/127.0.0.1:41685 2024-11-23T06:37:00,100 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:00,101 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:00,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data5/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:00,102 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data6/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:00,102 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] regionserver.HRegion(8855): Flush requested on 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:00,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:37:00,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/62b4817e431547e48ad0e6cb4f3f6c85 is 1080, key is row0002/info:/1732343816082/Put/seqid=0 2024-11-23T06:37:00,132 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,132 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:00,132 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741843_1026 2024-11-23T06:37:00,133 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:00,134 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,134 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:00,134 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741844_1027 2024-11-23T06:37:00,135 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:00,137 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48748 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741845_1028 to mirror 127.0.0.1:33139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,137 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:00,137 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741845_1028 2024-11-23T06:37:00,137 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48748 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:00,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48748 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48748 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,138 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:00,139 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,140 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:00,140 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741846_1029 2024-11-23T06:37:00,140 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:00,141 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:00,141 WARN [IPC Server handler 0 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:00,141 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:00,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741847_1030 (size=10347) 2024-11-23T06:37:00,525 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/62b4817e431547e48ad0e6cb4f3f6c85 2024-11-23T06:37:00,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/62b4817e431547e48ad0e6cb4f3f6c85 as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/62b4817e431547e48ad0e6cb4f3f6c85 2024-11-23T06:37:00,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/62b4817e431547e48ad0e6cb4f3f6c85, entries=5, sequenceid=11, filesize=10.1 K 2024-11-23T06:37:00,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 506342cf61e627db185b553c0e4ba305 in 449ms, sequenceid=11, compaction requested=false 2024-11-23T06:37:00,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:00,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] regionserver.HRegion(8855): Flush requested on 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:00,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-23T06:37:00,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/0eeba0f14de24ff3aff591284767410e is 1080, key is row0007/info:/1732343820112/Put/seqid=0 2024-11-23T06:37:00,755 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34055 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48774 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741848_1031 to mirror 127.0.0.1:34055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,755 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:00,755 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48774 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:00,755 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741848_1031 2024-11-23T06:37:00,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48774 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48774 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,756 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:00,758 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48790 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741849_1032 to mirror 127.0.0.1:45047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,758 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:00,758 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741849_1032 2024-11-23T06:37:00,758 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48790 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:00,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:48790 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48790 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:00,759 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:00,761 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,761 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:00,761 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741850_1033 2024-11-23T06:37:00,762 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:00,763 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:00,764 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:00,764 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741851_1034 2024-11-23T06:37:00,765 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:00,765 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:00,765 WARN [IPC Server handler 0 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:00,765 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:00,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741852_1035 (size=12506) 2024-11-23T06:37:01,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/0eeba0f14de24ff3aff591284767410e 2024-11-23T06:37:01,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/0eeba0f14de24ff3aff591284767410e as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e 2024-11-23T06:37:01,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e, entries=7, sequenceid=24, filesize=12.2 K 2024-11-23T06:37:01,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 506342cf61e627db185b553c0e4ba305 in 443ms, sequenceid=24, compaction requested=false 2024-11-23T06:37:01,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:01,184 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-23T06:37:01,184 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:01,184 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e because midkey is the same as first or last row 2024-11-23T06:37:01,874 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,004 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,004 WARN [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]] 2024-11-23T06:37:02,004 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41671%2C1732343800399:(num 1732343817976) roll requested 2024-11-23T06:37:02,005 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.1732343822004 2024-11-23T06:37:02,009 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,009 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:02,009 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741853_1036 2024-11-23T06:37:02,010 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:02,013 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,013 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:02,013 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741854_1037 2024-11-23T06:37:02,014 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:02,017 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34055 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,017 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57522 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741855_1038 to mirror 127.0.0.1:34055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:02,018 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:02,018 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741855_1038 2024-11-23T06:37:02,018 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57522 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:37:02,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57522 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57522 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:02,018 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:02,020 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,020 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:02,020 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741856_1039 2024-11-23T06:37:02,021 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:02,021 WARN [IPC Server handler 1 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:02,021 WARN [IPC Server handler 1 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:02,021 WARN [IPC Server handler 1 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:02,024 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:02,024 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:02,024 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:02,024 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:02,024 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:02,025 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343817976 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343822004 2024-11-23T06:37:02,025 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44083:44083)] 2024-11-23T06:37:02,025 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:37:02,025 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343817976 is not closed yet, will try archiving it next time 2024-11-23T06:37:02,026 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343813967 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs/df2f15951535%2C41671%2C1732343800399.1732343813967 2024-11-23T06:37:02,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741842_1025 (size=25992) 2024-11-23T06:37:02,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] regionserver.HRegion(8855): Flush requested on 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:02,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T06:37:02,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/acfc69ecd8cc46f59de2ba71c5deffcc is 1079, key is tmprow/info:/1732343822166/Put/seqid=0 2024-11-23T06:37:02,179 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,179 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:02,179 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741858_1041 2024-11-23T06:37:02,180 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:02,181 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,181 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:02,181 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741859_1042 2024-11-23T06:37:02,181 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:02,182 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,183 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:02,183 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741860_1043 2024-11-23T06:37:02,183 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:02,184 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,184 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:02,184 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741861_1044 2024-11-23T06:37:02,185 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:02,185 WARN [IPC Server handler 2 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:02,185 WARN [IPC Server handler 2 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:02,185 WARN [IPC Server handler 2 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:02,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741862_1045 (size=6027) 2024-11-23T06:37:02,427 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:37:02,526 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/acfc69ecd8cc46f59de2ba71c5deffcc 2024-11-23T06:37:02,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/acfc69ecd8cc46f59de2ba71c5deffcc as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/acfc69ecd8cc46f59de2ba71c5deffcc 2024-11-23T06:37:02,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/acfc69ecd8cc46f59de2ba71c5deffcc, entries=1, sequenceid=34, filesize=5.9 K 2024-11-23T06:37:02,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 506342cf61e627db185b553c0e4ba305 in 440ms, sequenceid=34, compaction requested=true 2024-11-23T06:37:02,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:02,608 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-23T06:37:02,608 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:02,609 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e because midkey is the same as first or last row 2024-11-23T06:37:02,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 506342cf61e627db185b553c0e4ba305:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:37:02,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:37:02,609 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:37:02,610 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:37:02,611 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HStore(1541): 506342cf61e627db185b553c0e4ba305/info is initiating minor compaction (all files) 2024-11-23T06:37:02,611 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 506342cf61e627db185b553c0e4ba305/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:02,611 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/62b4817e431547e48ad0e6cb4f3f6c85, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/acfc69ecd8cc46f59de2ba71c5deffcc] into tmpdir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp, totalSize=28.2 K 2024-11-23T06:37:02,611 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62b4817e431547e48ad0e6cb4f3f6c85, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732343816082 2024-11-23T06:37:02,612 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0eeba0f14de24ff3aff591284767410e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732343820112 2024-11-23T06:37:02,613 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.Compactor(225): Compacting acfc69ecd8cc46f59de2ba71c5deffcc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732343822166 2024-11-23T06:37:02,630 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 506342cf61e627db185b553c0e4ba305#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:37:02,630 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/025569dbc4884120a063b63cf4e510f4 is 1080, key is row0002/info:/1732343816082/Put/seqid=0 2024-11-23T06:37:02,633 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,633 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:02,633 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741863_1046 2024-11-23T06:37:02,634 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:02,635 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,635 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:02,635 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741864_1047 2024-11-23T06:37:02,636 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:02,637 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,637 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:02,638 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741865_1048 2024-11-23T06:37:02,638 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:02,641 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:02,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57560 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741866_1049 to mirror 127.0.0.1:45047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:02,641 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:02,641 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741866_1049 2024-11-23T06:37:02,641 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57560 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:02,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57560 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57560 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:02,642 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:02,642 WARN [IPC Server handler 3 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:02,642 WARN [IPC Server handler 3 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:02,642 WARN [IPC Server handler 3 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:02,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741867_1050 (size=17994) 2024-11-23T06:37:02,863 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bdcf597[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741847_1030 to 127.0.0.1:34055 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:02,863 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@608dcc62[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741852_1035 to 127.0.0.1:41079 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:03,055 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/025569dbc4884120a063b63cf4e510f4 as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 2024-11-23T06:37:03,065 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 506342cf61e627db185b553c0e4ba305/info of 506342cf61e627db185b553c0e4ba305 into 025569dbc4884120a063b63cf4e510f4(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:03,065 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305., storeName=506342cf61e627db185b553c0e4ba305/info, priority=13, startTime=1732343822609; duration=0sec 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 because midkey is the same as first or last row 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 because midkey is the same as first or last row 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 because midkey is the same as first or last row 2024-11-23T06:37:03,065 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:37:03,066 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 506342cf61e627db185b553c0e4ba305:info 2024-11-23T06:37:03,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] regionserver.HRegion(8855): Flush requested on 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:03,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T06:37:03,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/e11ea104418c463c82267df52c740759 is 1079, key is tmprow/info:/1732343823600/Put/seqid=0 2024-11-23T06:37:03,613 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:03,613 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:03,613 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741868_1051 2024-11-23T06:37:03,614 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:03,615 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:03,615 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:03,615 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741869_1052 2024-11-23T06:37:03,616 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:03,618 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:03,618 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:03,618 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741870_1053 2024-11-23T06:37:03,619 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:03,620 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:03,620 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:03,620 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741871_1054 2024-11-23T06:37:03,621 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:03,622 WARN [IPC Server handler 1 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:03,622 WARN [IPC Server handler 1 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:03,622 WARN [IPC Server handler 1 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:03,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741872_1055 (size=6027) 2024-11-23T06:37:03,863 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@608dcc62[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741842_1025 to 127.0.0.1:33139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:03,863 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bdcf597[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741862_1045 to 127.0.0.1:41079 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:03,874 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:04,026 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:04,026 WARN [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]] 2024-11-23T06:37:04,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/e11ea104418c463c82267df52c740759 2024-11-23T06:37:04,026 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41671%2C1732343800399:(num 1732343822004) roll requested 2024-11-23T06:37:04,026 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.1732343824026 2024-11-23T06:37:04,030 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34055 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:04,030 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57584 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741873_1056 to mirror 127.0.0.1:34055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:04,030 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:04,031 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741873_1056 2024-11-23T06:37:04,031 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57584 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:37:04,031 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57584 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57584 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:04,031 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:04,032 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:04,032 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:04,032 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741874_1057 2024-11-23T06:37:04,033 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:04,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/e11ea104418c463c82267df52c740759 as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e11ea104418c463c82267df52c740759 2024-11-23T06:37:04,035 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:04,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57596 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741875_1058 to mirror 127.0.0.1:33139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:04,035 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:04,035 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741875_1058 2024-11-23T06:37:04,035 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57596 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:37:04,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57596 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57596 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:04,036 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:04,038 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41079 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:04,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57606 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741876_1059 to mirror 127.0.0.1:41079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:04,038 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:04,038 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741876_1059 2024-11-23T06:37:04,038 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57606 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:37:04,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57606 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57606 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:04,039 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:04,040 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:04,040 WARN [IPC Server handler 0 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:04,040 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:04,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e11ea104418c463c82267df52c740759, entries=1, sequenceid=45, filesize=5.9 K 2024-11-23T06:37:04,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 506342cf61e627db185b553c0e4ba305 in 439ms, sequenceid=45, compaction requested=false 2024-11-23T06:37:04,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:04,041 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-23T06:37:04,041 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:04,041 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 because midkey is the same as first or last row 2024-11-23T06:37:04,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:04,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:04,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:04,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:04,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:04,043 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343822004 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343824026 2024-11-23T06:37:04,044 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44083:44083)] 2024-11-23T06:37:04,044 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:37:04,044 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343822004 is not closed yet, will try archiving it next time 2024-11-23T06:37:04,044 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343817976 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs/df2f15951535%2C41671%2C1732343800399.1732343817976 2024-11-23T06:37:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741857_1040 (size=13591) 2024-11-23T06:37:04,446 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 is not closed yet, will try archiving it next time 2024-11-23T06:37:04,527 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] regionserver.HRegion(8855): Flush requested on 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:05,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T06:37:05,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/6158fd01e7284c07bbb97fd9521a227e is 1079, key is tmprow/info:/1732343825035/Put/seqid=0 2024-11-23T06:37:05,048 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41079 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57626 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741878_1061 to mirror 127.0.0.1:41079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,048 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:05,049 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741878_1061 2024-11-23T06:37:05,049 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57626 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:05,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57626 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57626 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,049 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:05,050 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,050 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:05,050 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741879_1062 2024-11-23T06:37:05,051 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:05,052 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,052 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:05,052 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741880_1063 2024-11-23T06:37:05,052 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:05,054 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34055 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,054 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57640 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741881_1064 to mirror 127.0.0.1:34055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,054 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:05,054 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741881_1064 2024-11-23T06:37:05,054 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57640 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:05,055 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57640 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57640 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,055 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:05,056 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:05,056 WARN [IPC Server handler 0 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:05,056 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:05,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741882_1065 (size=6027) 2024-11-23T06:37:05,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/6158fd01e7284c07bbb97fd9521a227e 2024-11-23T06:37:05,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/6158fd01e7284c07bbb97fd9521a227e as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/6158fd01e7284c07bbb97fd9521a227e 2024-11-23T06:37:05,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/6158fd01e7284c07bbb97fd9521a227e, entries=1, sequenceid=55, filesize=5.9 K 2024-11-23T06:37:05,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 506342cf61e627db185b553c0e4ba305 in 437ms, sequenceid=55, compaction requested=true 2024-11-23T06:37:05,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:05,474 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-23T06:37:05,474 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:05,474 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 because midkey is the same as first or last row 2024-11-23T06:37:05,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 506342cf61e627db185b553c0e4ba305:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:37:05,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:37:05,475 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:37:05,476 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:37:05,476 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HStore(1541): 506342cf61e627db185b553c0e4ba305/info is initiating minor compaction (all files) 2024-11-23T06:37:05,476 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 506342cf61e627db185b553c0e4ba305/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:05,477 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e11ea104418c463c82267df52c740759, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/6158fd01e7284c07bbb97fd9521a227e] into tmpdir=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp, totalSize=29.3 K 2024-11-23T06:37:05,477 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.Compactor(225): Compacting 025569dbc4884120a063b63cf4e510f4, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732343816082 2024-11-23T06:37:05,478 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.Compactor(225): Compacting e11ea104418c463c82267df52c740759, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732343823600 2024-11-23T06:37:05,478 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6158fd01e7284c07bbb97fd9521a227e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732343825035 2024-11-23T06:37:05,494 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 506342cf61e627db185b553c0e4ba305#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:37:05,495 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/812db2414c8243a4a7b8da018bcc8abf is 1080, key is row0002/info:/1732343816082/Put/seqid=0 2024-11-23T06:37:05,497 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,497 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]) is bad. 2024-11-23T06:37:05,497 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741883_1066 2024-11-23T06:37:05,497 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34055,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK] 2024-11-23T06:37:05,499 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57668 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741884_1067 to mirror 127.0.0.1:33139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,499 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:05,499 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741884_1067 2024-11-23T06:37:05,499 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57668 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T06:37:05,500 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:57668 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57668 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,502 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:05,504 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,504 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:05,504 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741885_1068 2024-11-23T06:37:05,504 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:05,505 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,505 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]) is bad. 2024-11-23T06:37:05,505 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741886_1069 2024-11-23T06:37:05,506 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK] 2024-11-23T06:37:05,506 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T06:37:05,506 WARN [IPC Server handler 0 on default port 41685 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T06:37:05,506 WARN [IPC Server handler 0 on default port 41685 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T06:37:05,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741887_1070 (size=18097) 2024-11-23T06:37:05,862 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@608dcc62[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741872_1055 to 127.0.0.1:41079 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,862 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bdcf597[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741867_1050 to 127.0.0.1:41079 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:05,874 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:05,925 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/812db2414c8243a4a7b8da018bcc8abf as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/812db2414c8243a4a7b8da018bcc8abf 2024-11-23T06:37:05,935 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 506342cf61e627db185b553c0e4ba305/info of 506342cf61e627db185b553c0e4ba305 into 812db2414c8243a4a7b8da018bcc8abf(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:05,935 INFO [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305., storeName=506342cf61e627db185b553c0e4ba305/info, priority=13, startTime=1732343825475; duration=0sec 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/812db2414c8243a4a7b8da018bcc8abf because midkey is the same as first or last row 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/812db2414c8243a4a7b8da018bcc8abf because midkey is the same as first or last row 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/812db2414c8243a4a7b8da018bcc8abf because midkey is the same as first or last row 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:37:05,935 DEBUG [RS:0;df2f15951535:41671-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 506342cf61e627db185b553c0e4ba305:info 2024-11-23T06:37:06,044 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:06,044 WARN [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-23T06:37:06,066 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:06,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:06,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:06,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:06,072 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:37:06,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f8dfc7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:06,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@441dcfc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:06,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6219e1b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/java.io.tmpdir/jetty-localhost-33875-hadoop-hdfs-3_4_1-tests_jar-_-any-17817354943611157040/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:06,219 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e00c8cb{HTTP/1.1, (http/1.1)}{localhost:33875} 2024-11-23T06:37:06,219 INFO [Time-limited test {}] server.Server(415): Started @132216ms 2024-11-23T06:37:06,220 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:06,527 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:06,601 WARN [Thread-983 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:06,609 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3da4b5bd51e4f98c with lease ID 0x436432b1c7dff45e: from storage DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79 node DatanodeRegistration(127.0.0.1:41755, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=35227, infoSecurePort=0, ipcPort=37517, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:06,609 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3da4b5bd51e4f98c with lease ID 0x436432b1c7dff45e: from storage DS-68e2c096-89a5-4934-88e1-342e428b19cc node DatanodeRegistration(127.0.0.1:41755, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=35227, infoSecurePort=0, ipcPort=37517, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T06:37:06,861 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bdcf597[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741857_1040 to 127.0.0.1:33139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:06,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741882_1065 (size=6027) 2024-11-23T06:37:07,875 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:08,044 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:08,527 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:08,864 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@608dcc62[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43327, datanodeUuid=12a96f33-653c-43c5-a2f2-60cea5c5fb4b, infoPort=44083, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741887_1070 to 127.0.0.1:33139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:09,875 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,045 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,196 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:37:10,528 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,788 ERROR [FSHLog-0-hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData-prefix:df2f15951535,35247,1732343800220 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,788 WARN [FSHLog-0-hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData-prefix:df2f15951535,35247,1732343800220 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,789 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C35247%2C1732343800220:(num 1732343800546) roll requested 2024-11-23T06:37:10,790 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C35247%2C1732343800220.1732343830790 2024-11-23T06:37:10,797 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45047 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,797 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-120231856_22 at /127.0.0.1:57698 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741888_1071 to mirror 127.0.0.1:45047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:10,798 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK]) is bad. 2024-11-23T06:37:10,798 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-120231856_22 at /127.0.0.1:57698 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:37:10,798 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741888_1071 2024-11-23T06:37:10,798 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-120231856_22 at /127.0.0.1:57698 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57698 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:10,799 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45047,DS-0b08c883-3263-451a-944d-6e835950ad14,DISK] 2024-11-23T06:37:10,803 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:10,803 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:10,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:10,803 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:10,803 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:10,804 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343830790 2024-11-23T06:37:10,804 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,804 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:10,804 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 2024-11-23T06:37:10,805 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227),(127.0.0.1/127.0.0.1:44083:44083)] 2024-11-23T06:37:10,805 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 is not closed yet, will try archiving it next time 2024-11-23T06:37:10,805 WARN [IPC Server handler 0 on default port 41685 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-23T06:37:10,805 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 after 1ms 2024-11-23T06:37:11,876 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:12,046 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:13,876 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:14,047 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:14,807 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 after 4003ms 2024-11-23T06:37:15,876 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:16,047 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:16,623 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d613d76 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:41079,null,null]) java.net.ConnectException: Call From df2f15951535/172.17.0.3 to localhost:35317 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-23T06:37:16,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741833_1019 (size=455) 2024-11-23T06:37:17,013 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343801031 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs/df2f15951535%2C41671%2C1732343800399.1732343801031 2024-11-23T06:37:17,014 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343822004 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs/df2f15951535%2C41671%2C1732343800399.1732343822004 2024-11-23T06:37:17,877 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:18,048 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:18,607 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c96863c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41755, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=35227, infoSecurePort=0, ipcPort=37517, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741833_1019 to 127.0.0.1:45047 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:19,878 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:19,926 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.1732343839925 2024-11-23T06:37:19,932 WARN [Thread-1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:19,933 WARN [Thread-1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:41755,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:19,933 WARN [Thread-1015 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741890_1074 2024-11-23T06:37:19,934 WARN [Thread-1015 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:19,941 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:19,941 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:19,941 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:19,941 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:19,941 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:19,941 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343824026 with entries=12, filesize=11.46 KB; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343839925 2024-11-23T06:37:19,942 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44083:44083),(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-23T06:37:19,942 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343824026 is not closed yet, will try archiving it next time 2024-11-23T06:37:19,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741877_1060 (size=11743) 2024-11-23T06:37:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41671 {}] regionserver.HRegion(8855): Flush requested on 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:19,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T06:37:19,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/df983bdbcee7468b905809494c3cdaed is 1080, key is row0013/info:/1732343839944/Put/seqid=0 2024-11-23T06:37:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741892_1076 (size=9267) 2024-11-23T06:37:19,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741892_1076 (size=9267) 2024-11-23T06:37:19,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/df983bdbcee7468b905809494c3cdaed 2024-11-23T06:37:19,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/df983bdbcee7468b905809494c3cdaed as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/df983bdbcee7468b905809494c3cdaed 2024-11-23T06:37:19,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/df983bdbcee7468b905809494c3cdaed, entries=4, sequenceid=66, filesize=9.0 K 2024-11-23T06:37:19,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 506342cf61e627db185b553c0e4ba305 in 31ms, sequenceid=66, compaction requested=false 2024-11-23T06:37:19,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 506342cf61e627db185b553c0e4ba305: 2024-11-23T06:37:19,980 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-23T06:37:19,980 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:37:19,981 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/812db2414c8243a4a7b8da018bcc8abf because midkey is the same as first or last row 2024-11-23T06:37:20,049 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,049 INFO [regionserver/df2f15951535:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-23T06:37:20,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:37:20,170 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:37:20,170 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:20,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:20,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:20,170 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:37:20,171 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:37:20,171 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2025209955, stopped=false 2024-11-23T06:37:20,171 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,35247,1732343800220 2024-11-23T06:37:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:20,247 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:37:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:20,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:20,248 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:37:20,249 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:20,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:20,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:20,250 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:20,250 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,41671,1732343800399' ***** 2024-11-23T06:37:20,250 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:20,250 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:37:20,250 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,44895,1732343801767' ***** 2024-11-23T06:37:20,251 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:37:20,251 INFO [RS:1;df2f15951535:44895 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:37:20,251 INFO [RS:0;df2f15951535:41671 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:37:20,251 INFO [RS:1;df2f15951535:44895 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:37:20,251 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:37:20,251 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:37:20,251 INFO [RS:1;df2f15951535:44895 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:37:20,251 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(959): stopping server df2f15951535,44895,1732343801767 2024-11-23T06:37:20,252 INFO [RS:1;df2f15951535:44895 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:37:20,252 INFO [RS:1;df2f15951535:44895 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;df2f15951535:44895. 2024-11-23T06:37:20,252 DEBUG [RS:1;df2f15951535:44895 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:20,252 DEBUG [RS:1;df2f15951535:44895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:20,252 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(976): stopping server df2f15951535,44895,1732343801767; all regions closed. 2024-11-23T06:37:20,252 INFO [RS:0;df2f15951535:41671 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:37:20,252 INFO [RS:0;df2f15951535:41671 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:37:20,252 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(3091): Received CLOSE for 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:20,252 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,253 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,253 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,253 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(959): stopping server df2f15951535,41671,1732343800399 2024-11-23T06:37:20,253 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,253 INFO [RS:0;df2f15951535:41671 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:37:20,253 INFO [RS:0;df2f15951535:41671 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:41671. 2024-11-23T06:37:20,253 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,253 DEBUG [RS:0;df2f15951535:41671 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:20,253 DEBUG [RS:0;df2f15951535:41671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:20,253 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 506342cf61e627db185b553c0e4ba305, disabling compactions & flushes 2024-11-23T06:37:20,253 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:20,253 INFO [RS:0;df2f15951535:41671 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:37:20,253 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:20,253 INFO [RS:0;df2f15951535:41671 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:37:20,253 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. after waiting 0 ms 2024-11-23T06:37:20,253 INFO [RS:0;df2f15951535:41671 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:37:20,253 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:20,254 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:37:20,254 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 506342cf61e627db185b553c0e4ba305 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-23T06:37:20,254 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,254 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T06:37:20,254 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1325): Online Regions={506342cf61e627db185b553c0e4ba305=TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T06:37:20,254 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,254 DEBUG [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 506342cf61e627db185b553c0e4ba305 2024-11-23T06:37:20,254 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:37:20,254 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 2024-11-23T06:37:20,254 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:37:20,254 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:37:20,254 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:37:20,254 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:37:20,254 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-23T06:37:20,255 WARN [IPC Server handler 3 on default port 41685 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-11-23T06:37:20,255 ERROR [FSHLog-0-hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8-prefix:df2f15951535,41671,1732343800399.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,255 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 after 1ms 2024-11-23T06:37:20,255 WARN [FSHLog-0-hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8-prefix:df2f15951535,41671,1732343800399.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,255 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41671%2C1732343800399.meta:.meta(num 1732343801471) roll requested 2024-11-23T06:37:20,255 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41671%2C1732343800399.meta.1732343840255.meta 2024-11-23T06:37:20,258 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/e10d05c6f50b4a72be2786d2f4f22dce is 1080, key is row0016/info:/1732343839950/Put/seqid=0 2024-11-23T06:37:20,258 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:34468 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741893_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8]'}, localName='127.0.0.1:43327', datanodeUuid='12a96f33-653c-43c5-a2f2-60cea5c5fb4b', xmitsInProgress=0}:Exception transferring block BP-1232200571-172.17.0.3-1732343797832:blk_1073741893_1078 to mirror 127.0.0.1:33139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:20,258 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK], DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:20,258 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:34468 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741893_1078] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T06:37:20,258 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741893_1078 2024-11-23T06:37:20,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1875709587_22 at /127.0.0.1:34468 [Receiving block BP-1232200571-172.17.0.3-1732343797832:blk_1073741893_1078] {}] datanode.DataXceiver(331): 127.0.0.1:43327:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34468 dst: /127.0.0.1:43327 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:20,259 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:20,260 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,260 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:20,260 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741894_1079 2024-11-23T06:37:20,260 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:20,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,263 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343840255.meta 2024-11-23T06:37:20,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741896_1081 (size=13583) 2024-11-23T06:37:20,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741896_1081 (size=13583) 2024-11-23T06:37:20,266 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,266 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41079,DS-cc7069ca-18eb-4652-af29-2bfa06d2c5a7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,266 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta 2024-11-23T06:37:20,266 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/e10d05c6f50b4a72be2786d2f4f22dce 2024-11-23T06:37:20,266 WARN [IPC Server handler 3 on default port 41685 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-23T06:37:20,267 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta after 1ms 2024-11-23T06:37:20,268 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44083:44083),(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-23T06:37:20,268 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta is not closed yet, will try archiving it next time 2024-11-23T06:37:20,273 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/.tmp/info/e10d05c6f50b4a72be2786d2f4f22dce as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e10d05c6f50b4a72be2786d2f4f22dce 2024-11-23T06:37:20,280 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e10d05c6f50b4a72be2786d2f4f22dce, entries=8, sequenceid=77, filesize=13.3 K 2024-11-23T06:37:20,281 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 506342cf61e627db185b553c0e4ba305 in 27ms, sequenceid=77, compaction requested=true 2024-11-23T06:37:20,282 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/62b4817e431547e48ad0e6cb4f3f6c85, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/acfc69ecd8cc46f59de2ba71c5deffcc, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e11ea104418c463c82267df52c740759, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/6158fd01e7284c07bbb97fd9521a227e] to archive 2024-11-23T06:37:20,283 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T06:37:20,285 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/62b4817e431547e48ad0e6cb4f3f6c85 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/62b4817e431547e48ad0e6cb4f3f6c85 2024-11-23T06:37:20,287 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/0eeba0f14de24ff3aff591284767410e 2024-11-23T06:37:20,287 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/info/28f22332aa9343ed9d5d836b559dbb68 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305./info:regioninfo/1732343802280/Put/seqid=0 2024-11-23T06:37:20,288 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/025569dbc4884120a063b63cf4e510f4 2024-11-23T06:37:20,289 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,289 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:43327,DS-779ef5ea-cbea-46cf-83e8-fc3dcc55b461,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:20,289 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741897_1083 2024-11-23T06:37:20,290 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:20,290 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/acfc69ecd8cc46f59de2ba71c5deffcc to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/acfc69ecd8cc46f59de2ba71c5deffcc 2024-11-23T06:37:20,292 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e11ea104418c463c82267df52c740759 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/e11ea104418c463c82267df52c740759 2024-11-23T06:37:20,293 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/6158fd01e7284c07bbb97fd9521a227e to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/info/6158fd01e7284c07bbb97fd9521a227e 2024-11-23T06:37:20,293 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=df2f15951535:35247 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T06:37:20,294 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [62b4817e431547e48ad0e6cb4f3f6c85=10347, 0eeba0f14de24ff3aff591284767410e=12506, 025569dbc4884120a063b63cf4e510f4=17994, acfc69ecd8cc46f59de2ba71c5deffcc=6027, e11ea104418c463c82267df52c740759=6027, 6158fd01e7284c07bbb97fd9521a227e=6027] 2024-11-23T06:37:20,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741898_1084 (size=7089) 2024-11-23T06:37:20,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741898_1084 (size=7089) 2024-11-23T06:37:20,296 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/info/28f22332aa9343ed9d5d836b559dbb68 2024-11-23T06:37:20,298 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/506342cf61e627db185b553c0e4ba305/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-23T06:37:20,299 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:20,299 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 506342cf61e627db185b553c0e4ba305: Waiting for close lock at 1732343840253Running coprocessor pre-close hooks at 1732343840253Disabling compacts and flushes for region at 1732343840253Disabling writes for close at 1732343840253Obtaining lock to block concurrent updates at 1732343840254 (+1 ms)Preparing flush snapshotting stores in 506342cf61e627db185b553c0e4ba305 at 1732343840254Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1732343840254Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. at 1732343840255 (+1 ms)Flushing 506342cf61e627db185b553c0e4ba305/info: creating writer at 1732343840255Flushing 506342cf61e627db185b553c0e4ba305/info: appending metadata at 1732343840258 (+3 ms)Flushing 506342cf61e627db185b553c0e4ba305/info: closing flushed file at 1732343840258Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d4041a5: reopening flushed file at 1732343840272 (+14 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 506342cf61e627db185b553c0e4ba305 in 27ms, sequenceid=77, compaction requested=true at 1732343840281 (+9 ms)Writing region close event to WAL at 1732343840294 (+13 ms)Running coprocessor post-close hooks at 1732343840298 (+4 ms)Closed at 1732343840299 (+1 ms) 2024-11-23T06:37:20,299 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732343801902.506342cf61e627db185b553c0e4ba305. 2024-11-23T06:37:20,319 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/ns/bf17eb4517df49ed9c3789eb9915bf1e is 43, key is default/ns:d/1732343801675/Put/seqid=0 2024-11-23T06:37:20,321 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:20,321 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1232200571-172.17.0.3-1732343797832:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK], DatanodeInfoWithStorage[127.0.0.1:41755,DS-daaa1ba3-b4ea-419c-944d-1e445aba9a79,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK]) is bad. 2024-11-23T06:37:20,321 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-1232200571-172.17.0.3-1732343797832:blk_1073741899_1085 2024-11-23T06:37:20,322 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33139,DS-abdbfe18-acbf-4b98-b5fc-aa81b5bab4b2,DISK] 2024-11-23T06:37:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741900_1086 (size=5153) 2024-11-23T06:37:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741900_1086 (size=5153) 2024-11-23T06:37:20,333 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/ns/bf17eb4517df49ed9c3789eb9915bf1e 2024-11-23T06:37:20,344 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.1732343824026 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs/df2f15951535%2C41671%2C1732343800399.1732343824026 2024-11-23T06:37:20,354 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/table/159695ef4e8a4bf7881144cfa0ae66d1 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732343802292/Put/seqid=0 2024-11-23T06:37:20,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741901_1087 (size=5424) 2024-11-23T06:37:20,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741901_1087 (size=5424) 2024-11-23T06:37:20,359 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/table/159695ef4e8a4bf7881144cfa0ae66d1 2024-11-23T06:37:20,365 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/info/28f22332aa9343ed9d5d836b559dbb68 as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/info/28f22332aa9343ed9d5d836b559dbb68 2024-11-23T06:37:20,372 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/info/28f22332aa9343ed9d5d836b559dbb68, entries=10, sequenceid=11, filesize=6.9 K 2024-11-23T06:37:20,373 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/ns/bf17eb4517df49ed9c3789eb9915bf1e as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/ns/bf17eb4517df49ed9c3789eb9915bf1e 2024-11-23T06:37:20,378 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/ns/bf17eb4517df49ed9c3789eb9915bf1e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T06:37:20,380 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/.tmp/table/159695ef4e8a4bf7881144cfa0ae66d1 as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/table/159695ef4e8a4bf7881144cfa0ae66d1 2024-11-23T06:37:20,385 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/table/159695ef4e8a4bf7881144cfa0ae66d1, entries=2, sequenceid=11, filesize=5.3 K 2024-11-23T06:37:20,386 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-23T06:37:20,391 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T06:37:20,392 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:37:20,392 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:37:20,392 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343840254Running coprocessor pre-close hooks at 1732343840254Disabling compacts and flushes for region at 1732343840254Disabling writes for close at 1732343840254Obtaining lock to block concurrent updates at 1732343840254Preparing flush snapshotting stores in 1588230740 at 1732343840254Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732343840255 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732343840269 (+14 ms)Flushing 1588230740/info: creating writer at 1732343840269Flushing 1588230740/info: appending metadata at 1732343840287 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732343840287Flushing 1588230740/ns: creating writer at 1732343840302 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732343840319 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732343840319Flushing 1588230740/table: creating writer at 1732343840339 (+20 ms)Flushing 1588230740/table: appending metadata at 1732343840353 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732343840353Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24f7eb09: reopening flushed file at 1732343840364 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c978355: reopening flushed file at 1732343840372 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@311908ba: reopening flushed file at 1732343840379 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1732343840386 (+7 ms)Writing region close event to WAL at 1732343840388 (+2 ms)Running coprocessor post-close hooks at 1732343840392 (+4 ms)Closed at 1732343840392 2024-11-23T06:37:20,392 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:37:20,454 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(976): stopping server df2f15951535,41671,1732343800399; all regions closed. 2024-11-23T06:37:20,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,455 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,455 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,455 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:20,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741895_1080 (size=825) 2024-11-23T06:37:20,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741895_1080 (size=825) 2024-11-23T06:37:20,871 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T06:37:20,872 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T06:37:20,897 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:37:20,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T06:37:20,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:37:20,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:37:20,939 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T06:37:20,939 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T06:37:21,606 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c96863c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41755, datanodeUuid=d5007248-62f8-432a-8a26-b16b6214d840, infoPort=35227, infoSecurePort=0, ipcPort=37517, storageInfo=lv=-57;cid=testClusterID;nsid=1815771588;c=1732343797832):Failed to transfer BP-1232200571-172.17.0.3-1732343797832:blk_1073741835_1011 to 127.0.0.1:33139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:21,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:37:21,797 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T06:37:21,797 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T06:37:21,873 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:37:22,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:37:22,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:37:23,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741877_1060 (size=11743) 2024-11-23T06:37:24,257 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 after 4003ms 2024-11-23T06:37:24,269 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta after 4002ms 2024-11-23T06:37:24,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:37:24,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:37:25,254 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-23T06:37:25,260 DEBUG [RS:1;df2f15951535:44895 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs 2024-11-23T06:37:25,260 INFO [RS:1;df2f15951535:44895 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C44895%2C1732343801767:(num 1732343802004) 2024-11-23T06:37:25,260 DEBUG [RS:1;df2f15951535:44895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:25,261 INFO [RS:1;df2f15951535:44895 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:37:25,261 INFO [RS:1;df2f15951535:44895 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:37:25,261 INFO [RS:1;df2f15951535:44895 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:37:25,262 INFO [RS:1;df2f15951535:44895 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:37:25,262 INFO [RS:1;df2f15951535:44895 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:37:25,262 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:37:25,262 INFO [RS:1;df2f15951535:44895 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:37:25,262 INFO [RS:1;df2f15951535:44895 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:37:25,263 INFO [RS:1;df2f15951535:44895 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44895 2024-11-23T06:37:25,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,44895,1732343801767 2024-11-23T06:37:25,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:37:25,330 INFO [RS:1;df2f15951535:44895 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:37:25,340 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,44895,1732343801767] 2024-11-23T06:37:25,350 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,44895,1732343801767 already deleted, retry=false 2024-11-23T06:37:25,350 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,44895,1732343801767 expired; onlineServers=1 2024-11-23T06:37:25,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:37:25,440 INFO [RS:1;df2f15951535:44895 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:37:25,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44895-0x10166689d340002, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:37:25,440 INFO [RS:1;df2f15951535:44895 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,44895,1732343801767; zookeeper connection closed. 2024-11-23T06:37:25,441 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@28809216 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@28809216 2024-11-23T06:37:25,456 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-23T06:37:25,460 DEBUG [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs 2024-11-23T06:37:25,460 INFO [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C41671%2C1732343800399.meta:.meta(num 1732343840255) 2024-11-23T06:37:25,461 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,461 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,461 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,461 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,462 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741891_1075 (size=14682) 2024-11-23T06:37:25,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741891_1075 (size=14682) 2024-11-23T06:37:25,467 DEBUG [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs 2024-11-23T06:37:25,467 INFO [RS:0;df2f15951535:41671 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C41671%2C1732343800399:(num 1732343839925) 2024-11-23T06:37:25,467 DEBUG [RS:0;df2f15951535:41671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:25,467 INFO [RS:0;df2f15951535:41671 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:37:25,467 INFO [RS:0;df2f15951535:41671 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:37:25,467 INFO [RS:0;df2f15951535:41671 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:37:25,467 INFO [RS:0;df2f15951535:41671 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:37:25,467 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:37:25,467 INFO [RS:0;df2f15951535:41671 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41671 2024-11-23T06:37:25,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:37:25,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,41671,1732343800399 2024-11-23T06:37:25,498 INFO [RS:0;df2f15951535:41671 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:37:25,508 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,41671,1732343800399] 2024-11-23T06:37:25,519 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,41671,1732343800399 already deleted, retry=false 2024-11-23T06:37:25,519 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,41671,1732343800399 expired; onlineServers=0 2024-11-23T06:37:25,519 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,35247,1732343800220' ***** 2024-11-23T06:37:25,519 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:37:25,519 INFO [M:0;df2f15951535:35247 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:37:25,519 INFO [M:0;df2f15951535:35247 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:37:25,519 DEBUG [M:0;df2f15951535:35247 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:37:25,519 DEBUG [M:0;df2f15951535:35247 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:37:25,519 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:37:25,519 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343800788 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343800788,5,FailOnTimeoutGroup] 2024-11-23T06:37:25,519 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343800787 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343800787,5,FailOnTimeoutGroup] 2024-11-23T06:37:25,520 INFO [M:0;df2f15951535:35247 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:37:25,520 INFO [M:0;df2f15951535:35247 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:37:25,520 DEBUG [M:0;df2f15951535:35247 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:37:25,520 INFO [M:0;df2f15951535:35247 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:37:25,520 INFO [M:0;df2f15951535:35247 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:37:25,520 INFO [M:0;df2f15951535:35247 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:37:25,520 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:37:25,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:37:25,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:25,529 DEBUG [M:0;df2f15951535:35247 {}] zookeeper.ZKUtil(347): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:37:25,529 WARN [M:0;df2f15951535:35247 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:37:25,530 INFO [M:0;df2f15951535:35247 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/.lastflushedseqids 2024-11-23T06:37:25,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741902_1088 (size=130) 2024-11-23T06:37:25,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741902_1088 (size=130) 2024-11-23T06:37:25,537 INFO [M:0;df2f15951535:35247 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:37:25,537 INFO [M:0;df2f15951535:35247 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:37:25,537 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:37:25,537 INFO [M:0;df2f15951535:35247 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:25,537 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:25,537 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:37:25,537 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:25,538 INFO [M:0;df2f15951535:35247 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-23T06:37:25,557 DEBUG [M:0;df2f15951535:35247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a362c80286e6442e9873fb1b31fa9fdd is 82, key is hbase:meta,,1/info:regioninfo/1732343801534/Put/seqid=0 2024-11-23T06:37:25,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741903_1089 (size=5672) 2024-11-23T06:37:25,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741903_1089 (size=5672) 2024-11-23T06:37:25,562 INFO [M:0;df2f15951535:35247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a362c80286e6442e9873fb1b31fa9fdd 2024-11-23T06:37:25,582 DEBUG [M:0;df2f15951535:35247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6405f2cb961b4151a72179aad549758e is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732343802299/Put/seqid=0 2024-11-23T06:37:25,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741904_1090 (size=6255) 2024-11-23T06:37:25,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741904_1090 (size=6255) 2024-11-23T06:37:25,587 INFO [M:0;df2f15951535:35247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6405f2cb961b4151a72179aad549758e 2024-11-23T06:37:25,592 INFO [M:0;df2f15951535:35247 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6405f2cb961b4151a72179aad549758e 2024-11-23T06:37:25,607 DEBUG [M:0;df2f15951535:35247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0dbf80773c234eac8df8e875d82f0f0b is 69, key is df2f15951535,41671,1732343800399/rs:state/1732343800874/Put/seqid=0 2024-11-23T06:37:25,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:37:25,608 INFO [RS:0;df2f15951535:41671 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:37:25,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41671-0x10166689d340001, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:37:25,609 INFO [RS:0;df2f15951535:41671 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,41671,1732343800399; zookeeper connection closed. 2024-11-23T06:37:25,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:37:25,609 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@13b46c9f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@13b46c9f 2024-11-23T06:37:25,609 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-23T06:37:25,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741905_1091 (size=5224) 2024-11-23T06:37:25,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741905_1091 (size=5224) 2024-11-23T06:37:25,612 INFO [M:0;df2f15951535:35247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0dbf80773c234eac8df8e875d82f0f0b 2024-11-23T06:37:25,630 DEBUG [M:0;df2f15951535:35247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d63f0a7d7453415abd70d26b166c6469 is 52, key is load_balancer_on/state:d/1732343801748/Put/seqid=0 2024-11-23T06:37:25,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741906_1092 (size=5056) 2024-11-23T06:37:25,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741906_1092 (size=5056) 2024-11-23T06:37:25,635 INFO [M:0;df2f15951535:35247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d63f0a7d7453415abd70d26b166c6469 2024-11-23T06:37:25,641 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a362c80286e6442e9873fb1b31fa9fdd as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a362c80286e6442e9873fb1b31fa9fdd 2024-11-23T06:37:25,646 INFO [M:0;df2f15951535:35247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a362c80286e6442e9873fb1b31fa9fdd, entries=8, sequenceid=60, filesize=5.5 K 2024-11-23T06:37:25,647 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6405f2cb961b4151a72179aad549758e as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6405f2cb961b4151a72179aad549758e 2024-11-23T06:37:25,653 INFO [M:0;df2f15951535:35247 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6405f2cb961b4151a72179aad549758e 2024-11-23T06:37:25,653 INFO [M:0;df2f15951535:35247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6405f2cb961b4151a72179aad549758e, entries=6, sequenceid=60, filesize=6.1 K 2024-11-23T06:37:25,654 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0dbf80773c234eac8df8e875d82f0f0b as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0dbf80773c234eac8df8e875d82f0f0b 2024-11-23T06:37:25,659 INFO [M:0;df2f15951535:35247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0dbf80773c234eac8df8e875d82f0f0b, entries=2, sequenceid=60, filesize=5.1 K 2024-11-23T06:37:25,660 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d63f0a7d7453415abd70d26b166c6469 as hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d63f0a7d7453415abd70d26b166c6469 2024-11-23T06:37:25,665 INFO [M:0;df2f15951535:35247 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d63f0a7d7453415abd70d26b166c6469, entries=1, sequenceid=60, filesize=4.9 K 2024-11-23T06:37:25,666 INFO [M:0;df2f15951535:35247 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false 2024-11-23T06:37:25,667 INFO [M:0;df2f15951535:35247 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:25,667 DEBUG [M:0;df2f15951535:35247 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343845537Disabling compacts and flushes for region at 1732343845537Disabling writes for close at 1732343845537Obtaining lock to block concurrent updates at 1732343845538 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343845538Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732343845538Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343845539 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343845539Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343845556 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343845557 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343845567 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343845581 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343845581Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343845592 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343845606 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343845606Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343845617 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343845630 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343845630Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cb6af3e: reopening flushed file at 1732343845640 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23c3d436: reopening flushed file at 1732343845647 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72287812: reopening flushed file at 1732343845653 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@331b2ae5: reopening flushed file at 1732343845659 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false at 1732343845666 (+7 ms)Writing region close event to WAL at 1732343845667 (+1 ms)Closed at 1732343845667 2024-11-23T06:37:25,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,668 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,668 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:25,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43327 is added to blk_1073741889_1072 (size=1045) 2024-11-23T06:37:25,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41755 is added to blk_1073741889_1072 (size=1045) 2024-11-23T06:37:25,825 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:37:25,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:25,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:26,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:26,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:26,629 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@116d3d03 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1232200571-172.17.0.3-1732343797832:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41079,null,null]) java.net.ConnectException: Call From df2f15951535/172.17.0.3 to localhost:35317 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-23T06:37:26,823 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/WALs/df2f15951535,35247,1732343800220/df2f15951535%2C35247%2C1732343800220.1732343800546 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/oldWALs/df2f15951535%2C35247%2C1732343800220.1732343800546 2024-11-23T06:37:26,832 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/MasterData/oldWALs/df2f15951535%2C35247%2C1732343800220.1732343800546 to hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/oldWALs/df2f15951535%2C35247%2C1732343800220.1732343800546$masterlocalwal$ 2024-11-23T06:37:26,833 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:37:26,833 INFO [M:0;df2f15951535:35247 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:37:26,833 INFO [M:0;df2f15951535:35247 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35247 2024-11-23T06:37:26,833 INFO [M:0;df2f15951535:35247 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:37:26,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:37:26,988 INFO [M:0;df2f15951535:35247 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:37:26,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35247-0x10166689d340000, quorum=127.0.0.1:62386, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:37:26,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6219e1b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:26,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e00c8cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:26,992 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:26,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@441dcfc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:26,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f8dfc7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:26,994 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:26,994 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:26,994 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:26,994 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1232200571-172.17.0.3-1732343797832 (Datanode Uuid d5007248-62f8-432a-8a26-b16b6214d840) service to localhost/127.0.0.1:41685 2024-11-23T06:37:26,994 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6970d8ec {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41079,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35317 , LocalHost:localPort df2f15951535/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-23T06:37:26,994 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6970d8ec {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1232200571-172.17.0.3-1732343797832:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41755,null,null], DatanodeInfoWithStorage[127.0.0.1:41079,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1232200571-172.17.0.3-1732343797832 2024-11-23T06:37:26,994 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6970d8ec {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41755,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1232200571-172.17.0.3-1732343797832 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:26,995 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6970d8ec {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41079,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1232200571-172.17.0.3-1732343797832 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:26,995 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6970d8ec {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41755,null,null], DatanodeInfoWithStorage[127.0.0.1:41079,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1232200571-172.17.0.3-1732343797832:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:41755,null,null], DatanodeInfoWithStorage[127.0.0.1:41079,null,null]] 2024-11-23T06:37:26,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data3/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:26,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data4/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:26,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:26,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66046020{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:26,999 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b86931{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:26,999 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:26,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ccc1bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:27,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bea65f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:27,001 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:27,001 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:27,001 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:27,001 WARN [BP-1232200571-172.17.0.3-1732343797832 heartbeating to localhost/127.0.0.1:41685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1232200571-172.17.0.3-1732343797832 (Datanode Uuid 12a96f33-653c-43c5-a2f2-60cea5c5fb4b) service to localhost/127.0.0.1:41685 2024-11-23T06:37:27,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data7/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:27,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/cluster_b44d69c9-8ecf-9538-19df-558119020f10/data/data8/current/BP-1232200571-172.17.0.3-1732343797832 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:27,002 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:27,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2295376c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:37:27,008 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54adbc26{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:27,008 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:27,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac253d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:27,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab5393f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:27,015 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:37:27,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:37:27,053 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 79) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41685 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41685 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41685 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41685 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41685 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41685 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41685 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41685 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41685 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fbc60bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41685 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fbc60bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41685 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=448 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=188 (was 287), ProcessCount=11 (was 11), AvailableMemoryMB=7825 (was 8378) 2024-11-23T06:37:27,059 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=188, ProcessCount=11, AvailableMemoryMB=7825 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.log.dir so I do NOT create it in target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b3efa35-bede-51fe-275e-2e1b34bdea26/hadoop.tmp.dir so I do NOT create it in target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006, deleteOnExit=true 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/test.cache.data in system properties and HBase conf 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:37:27,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:37:27,060 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:37:27,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:37:27,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:37:27,073 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:37:27,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:27,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:27,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:27,450 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:27,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:27,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:27,451 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:37:27,452 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:27,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25a29a07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:27,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7096145a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:27,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49ef22be{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-42623-hadoop-hdfs-3_4_1-tests_jar-_-any-2547799839460284818/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:37:27,547 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b230242{HTTP/1.1, (http/1.1)}{localhost:42623} 2024-11-23T06:37:27,547 INFO [Time-limited test {}] server.Server(415): Started @153544ms 2024-11-23T06:37:27,558 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:37:27,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:27,818 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:27,819 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:27,819 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:27,819 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:37:27,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fc8bed8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:27,820 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13646a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:27,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fb42f9a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-41805-hadoop-hdfs-3_4_1-tests_jar-_-any-8248436145166895953/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:27,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d74671d{HTTP/1.1, (http/1.1)}{localhost:41805} 2024-11-23T06:37:27,914 INFO [Time-limited test {}] server.Server(415): Started @153911ms 2024-11-23T06:37:27,915 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:27,939 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:27,943 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:27,944 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:27,944 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:27,944 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:37:27,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:27,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:28,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37687418{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-44571-hadoop-hdfs-3_4_1-tests_jar-_-any-4714006311536046625/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:28,040 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e7f214b{HTTP/1.1, (http/1.1)}{localhost:44571} 2024-11-23T06:37:28,040 INFO [Time-limited test {}] server.Server(415): Started @154036ms 2024-11-23T06:37:28,041 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:28,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:28,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:29,122 WARN [Thread-1193 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data1/current/BP-598499779-172.17.0.3-1732343847083/current, will proceed with Du for space computation calculation, 2024-11-23T06:37:29,122 WARN [Thread-1194 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data2/current/BP-598499779-172.17.0.3-1732343847083/current, will proceed with Du for space computation calculation, 2024-11-23T06:37:29,138 WARN [Thread-1157 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45cffd71c88c9d9f with lease ID 0x170f246ce1604e5b: Processing first storage report for DS-58ebab4b-6297-4f78-8fa0-772540d0dd04 from datanode DatanodeRegistration(127.0.0.1:34093, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=38253, infoSecurePort=0, ipcPort=34721, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083) 2024-11-23T06:37:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45cffd71c88c9d9f with lease ID 0x170f246ce1604e5b: from storage DS-58ebab4b-6297-4f78-8fa0-772540d0dd04 node DatanodeRegistration(127.0.0.1:34093, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=38253, infoSecurePort=0, ipcPort=34721, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45cffd71c88c9d9f with lease ID 0x170f246ce1604e5b: Processing first storage report for DS-c6a74855-42f0-440b-821d-59c86375446f from datanode DatanodeRegistration(127.0.0.1:34093, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=38253, infoSecurePort=0, ipcPort=34721, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083) 2024-11-23T06:37:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45cffd71c88c9d9f with lease ID 0x170f246ce1604e5b: from storage DS-c6a74855-42f0-440b-821d-59c86375446f node DatanodeRegistration(127.0.0.1:34093, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=38253, infoSecurePort=0, ipcPort=34721, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:29,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:29,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:29,329 WARN [Thread-1204 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data3/current/BP-598499779-172.17.0.3-1732343847083/current, will proceed with Du for space computation calculation, 2024-11-23T06:37:29,329 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data4/current/BP-598499779-172.17.0.3-1732343847083/current, will proceed with Du for space computation calculation, 2024-11-23T06:37:29,348 WARN [Thread-1180 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68c9ff2c8266102f with lease ID 0x170f246ce1604e5c: Processing first storage report for DS-45e7fee3-2bea-490f-b165-1f9937c883f3 from datanode DatanodeRegistration(127.0.0.1:43123, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=39013, infoSecurePort=0, ipcPort=44803, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083) 2024-11-23T06:37:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68c9ff2c8266102f with lease ID 0x170f246ce1604e5c: from storage DS-45e7fee3-2bea-490f-b165-1f9937c883f3 node DatanodeRegistration(127.0.0.1:43123, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=39013, infoSecurePort=0, ipcPort=44803, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68c9ff2c8266102f with lease ID 0x170f246ce1604e5c: Processing first storage report for DS-891afff4-5430-468d-b3a3-417ac165acc2 from datanode DatanodeRegistration(127.0.0.1:43123, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=39013, infoSecurePort=0, ipcPort=44803, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083) 2024-11-23T06:37:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68c9ff2c8266102f with lease ID 0x170f246ce1604e5c: from storage DS-891afff4-5430-468d-b3a3-417ac165acc2 node DatanodeRegistration(127.0.0.1:43123, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=39013, infoSecurePort=0, ipcPort=44803, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:29,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9 2024-11-23T06:37:29,380 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/zookeeper_0, clientPort=56651, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:37:29,381 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56651 2024-11-23T06:37:29,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,383 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:37:29,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:37:29,396 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca with version=8 2024-11-23T06:37:29,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:37:29,399 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:37:29,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:37:29,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:37:29,400 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:37:29,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:37:29,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:37:29,400 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:37:29,400 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:37:29,401 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42835 2024-11-23T06:37:29,402 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42835 connecting to ZooKeeper ensemble=127.0.0.1:56651 2024-11-23T06:37:29,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428350x0, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:37:29,458 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42835-0x10166695d4e0000 connected 2024-11-23T06:37:29,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:29,558 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca, hbase.cluster.distributed=false 2024-11-23T06:37:29,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:37:29,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42835 2024-11-23T06:37:29,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42835 2024-11-23T06:37:29,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42835 2024-11-23T06:37:29,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42835 2024-11-23T06:37:29,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42835 2024-11-23T06:37:29,581 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:37:29,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:37:29,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:37:29,581 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:37:29,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:37:29,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:37:29,581 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:37:29,582 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:37:29,582 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41201 2024-11-23T06:37:29,583 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41201 connecting to ZooKeeper ensemble=127.0.0.1:56651 2024-11-23T06:37:29,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412010x0, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:37:29,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412010x0, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:29,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41201-0x10166695d4e0001 connected 2024-11-23T06:37:29,596 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:37:29,597 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:37:29,597 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:37:29,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:37:29,599 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41201 2024-11-23T06:37:29,599 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41201 2024-11-23T06:37:29,599 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41201 2024-11-23T06:37:29,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41201 2024-11-23T06:37:29,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41201 2024-11-23T06:37:29,614 DEBUG [M:0;df2f15951535:42835 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:42835 2024-11-23T06:37:29,614 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,42835,1732343849399 2024-11-23T06:37:29,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:37:29,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:37:29,627 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,42835,1732343849399 2024-11-23T06:37:29,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:37:29,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,638 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:37:29,639 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,42835,1732343849399 from backup master directory 2024-11-23T06:37:29,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,42835,1732343849399 2024-11-23T06:37:29,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:37:29,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:37:29,648 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:37:29,648 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,42835,1732343849399 2024-11-23T06:37:29,656 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/hbase.id] with ID: f8ad3679-9010-43c7-9f83-f3c1fd1d33c9 2024-11-23T06:37:29,657 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/.tmp/hbase.id 2024-11-23T06:37:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:37:29,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:37:29,665 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/.tmp/hbase.id]:[hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/hbase.id] 2024-11-23T06:37:29,680 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:29,681 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:37:29,682 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-23T06:37:29,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:37:29,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:37:29,707 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:37:29,708 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:37:29,708 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:37:29,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:37:29,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:37:29,715 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store 2024-11-23T06:37:29,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:37:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:37:29,722 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:37:29,722 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:37:29,722 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:29,722 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:29,722 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:37:29,722 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:29,722 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:37:29,723 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343849722Disabling compacts and flushes for region at 1732343849722Disabling writes for close at 1732343849722Writing region close event to WAL at 1732343849722Closed at 1732343849722 2024-11-23T06:37:29,723 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/.initializing 2024-11-23T06:37:29,723 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399 2024-11-23T06:37:29,726 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C42835%2C1732343849399, suffix=, logDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399, archiveDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/oldWALs, maxLogs=10 2024-11-23T06:37:29,726 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C42835%2C1732343849399.1732343849726 2024-11-23T06:37:29,731 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 2024-11-23T06:37:29,736 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39013:39013),(127.0.0.1/127.0.0.1:38253:38253)] 2024-11-23T06:37:29,739 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:37:29,739 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:37:29,739 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,739 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:37:29,742 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:29,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:29,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:37:29,744 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:29,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:37:29,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:37:29,745 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:29,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:37:29,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:37:29,747 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:29,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:37:29,748 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,748 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,749 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,750 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,750 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,750 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:37:29,751 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:37:29,755 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:37:29,755 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841436, jitterRate=0.06994225084781647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:37:29,756 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343849739Initializing all the Stores at 1732343849740 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343849740Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343849741 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343849741Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343849741Cleaning up temporary data from old regions at 1732343849750 (+9 ms)Region opened successfully at 1732343849756 (+6 ms) 2024-11-23T06:37:29,757 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:37:29,760 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3335a4ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:37:29,761 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:37:29,761 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:37:29,761 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:37:29,762 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:37:29,762 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T06:37:29,763 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T06:37:29,763 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:37:29,765 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:37:29,766 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:37:29,774 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:37:29,774 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:37:29,775 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:37:29,784 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:37:29,785 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:37:29,786 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:37:29,795 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:37:29,796 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:37:29,806 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:37:29,808 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:37:29,816 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:37:29,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:29,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:29,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,827 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,42835,1732343849399, sessionid=0x10166695d4e0000, setting cluster-up flag (Was=false) 2024-11-23T06:37:29,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,879 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:37:29,881 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,42835,1732343849399 2024-11-23T06:37:29,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:29,932 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:37:29,933 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,42835,1732343849399 2024-11-23T06:37:29,936 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:37:29,938 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:37:29,938 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:37:29,938 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:37:29,939 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,42835,1732343849399 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:37:29,940 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:37:29,940 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:37:29,940 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:37:29,940 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:37:29,940 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:37:29,941 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:29,941 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:37:29,941 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732343879941 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:37:29,942 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:29,943 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:37:29,943 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:37:29,943 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:37:29,943 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:37:29,943 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:37:29,943 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:37:29,943 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:37:29,943 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343849943,5,FailOnTimeoutGroup] 2024-11-23T06:37:29,943 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343849943,5,FailOnTimeoutGroup] 2024-11-23T06:37:29,944 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:29,944 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:37:29,944 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:29,944 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:29,944 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:29,944 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:37:29,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:37:29,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:37:29,960 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:37:29,960 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca 2024-11-23T06:37:29,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:37:29,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:37:30,002 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(746): ClusterId : f8ad3679-9010-43c7-9f83-f3c1fd1d33c9 2024-11-23T06:37:30,002 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:37:30,013 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:37:30,013 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:37:30,023 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:37:30,024 DEBUG [RS:0;df2f15951535:41201 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@651d1ee6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:37:30,034 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:41201 2024-11-23T06:37:30,034 INFO [RS:0;df2f15951535:41201 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:37:30,034 INFO [RS:0;df2f15951535:41201 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:37:30,034 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:37:30,035 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,42835,1732343849399 with port=41201, startcode=1732343849581 2024-11-23T06:37:30,035 DEBUG [RS:0;df2f15951535:41201 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:37:30,038 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44563, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:37:30,038 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42835 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,41201,1732343849581 2024-11-23T06:37:30,038 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42835 {}] master.ServerManager(517): Registering regionserver=df2f15951535,41201,1732343849581 2024-11-23T06:37:30,040 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca 2024-11-23T06:37:30,040 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38355 2024-11-23T06:37:30,040 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:37:30,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:37:30,048 DEBUG [RS:0;df2f15951535:41201 {}] zookeeper.ZKUtil(111): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,41201,1732343849581 2024-11-23T06:37:30,048 WARN [RS:0;df2f15951535:41201 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:37:30,048 INFO [RS:0;df2f15951535:41201 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:37:30,048 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581 2024-11-23T06:37:30,049 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,41201,1732343849581] 2024-11-23T06:37:30,052 INFO [RS:0;df2f15951535:41201 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:37:30,053 INFO [RS:0;df2f15951535:41201 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:37:30,054 INFO [RS:0;df2f15951535:41201 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:37:30,054 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,054 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:37:30,055 INFO [RS:0;df2f15951535:41201 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:37:30,055 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,055 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:37:30,056 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:37:30,057 DEBUG [RS:0;df2f15951535:41201 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:37:30,060 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,060 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,060 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,060 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,060 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,060 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41201,1732343849581-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:37:30,075 INFO [RS:0;df2f15951535:41201 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:37:30,075 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41201,1732343849581-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,075 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,075 INFO [RS:0;df2f15951535:41201 {}] regionserver.Replication(171): df2f15951535,41201,1732343849581 started 2024-11-23T06:37:30,090 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,090 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,41201,1732343849581, RpcServer on df2f15951535/172.17.0.3:41201, sessionid=0x10166695d4e0001 2024-11-23T06:37:30,090 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:37:30,090 DEBUG [RS:0;df2f15951535:41201 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,41201,1732343849581 2024-11-23T06:37:30,090 DEBUG [RS:0;df2f15951535:41201 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,41201,1732343849581' 2024-11-23T06:37:30,090 DEBUG [RS:0;df2f15951535:41201 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:37:30,090 DEBUG [RS:0;df2f15951535:41201 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:37:30,091 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:37:30,091 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:37:30,091 DEBUG [RS:0;df2f15951535:41201 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,41201,1732343849581 2024-11-23T06:37:30,091 DEBUG [RS:0;df2f15951535:41201 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,41201,1732343849581' 2024-11-23T06:37:30,091 DEBUG [RS:0;df2f15951535:41201 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:37:30,091 DEBUG [RS:0;df2f15951535:41201 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:37:30,092 DEBUG [RS:0;df2f15951535:41201 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:37:30,092 INFO [RS:0;df2f15951535:41201 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:37:30,092 INFO [RS:0;df2f15951535:41201 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:37:30,194 INFO [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41201%2C1732343849581, suffix=, logDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581, archiveDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs, maxLogs=32 2024-11-23T06:37:30,195 INFO [RS:0;df2f15951535:41201 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:30,203 INFO [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:30,205 DEBUG [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38253:38253),(127.0.0.1/127.0.0.1:39013:39013)] 2024-11-23T06:37:30,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:30,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:30,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:37:30,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:37:30,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:37:30,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:37:30,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:37:30,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:37:30,379 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:37:30,379 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:37:30,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:37:30,380 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:37:30,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740 2024-11-23T06:37:30,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740 2024-11-23T06:37:30,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:37:30,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:37:30,383 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:37:30,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:37:30,386 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:37:30,387 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811803, jitterRate=0.03226099908351898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:37:30,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343850370Initializing all the Stores at 1732343850371 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343850371Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343850371Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343850371Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343850371Cleaning up temporary data from old regions at 1732343850383 (+12 ms)Region opened successfully at 1732343850387 (+4 ms) 2024-11-23T06:37:30,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:37:30,387 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:37:30,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:37:30,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:37:30,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:37:30,388 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:37:30,388 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343850387Disabling compacts and flushes for region at 1732343850387Disabling writes for close at 1732343850387Writing region close event to WAL at 1732343850388 (+1 ms)Closed at 1732343850388 2024-11-23T06:37:30,389 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:37:30,389 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:37:30,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:37:30,390 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:37:30,391 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:37:30,542 DEBUG [df2f15951535:42835 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:37:30,543 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,41201,1732343849581 2024-11-23T06:37:30,547 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,41201,1732343849581, state=OPENING 2024-11-23T06:37:30,606 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:37:30,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:30,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:30,617 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:37:30,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:37:30,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:37:30,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,41201,1732343849581}] 2024-11-23T06:37:30,772 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:37:30,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:37:30,783 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:37:30,783 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:37:30,785 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41201%2C1732343849581.meta, suffix=.meta, logDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581, archiveDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs, maxLogs=32 2024-11-23T06:37:30,786 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta 2024-11-23T06:37:30,793 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta 2024-11-23T06:37:30,799 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38253:38253),(127.0.0.1/127.0.0.1:39013:39013)] 2024-11-23T06:37:30,800 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:37:30,800 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:37:30,801 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:37:30,801 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:37:30,801 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:37:30,801 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:37:30,801 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:37:30,801 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:37:30,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:37:30,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:37:30,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:37:30,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:37:30,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:37:30,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:37:30,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:37:30,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:37:30,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:37:30,807 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:37:30,808 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740 2024-11-23T06:37:30,809 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740 2024-11-23T06:37:30,810 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:37:30,810 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:37:30,810 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:37:30,811 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:37:30,812 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786518, jitterRate=1.096576452255249E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:37:30,812 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:37:30,813 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343850801Writing region info on filesystem at 1732343850801Initializing all the Stores at 1732343850802 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343850802Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343850802Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343850802Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343850802Cleaning up temporary data from old regions at 1732343850810 (+8 ms)Running coprocessor post-open hooks at 1732343850812 (+2 ms)Region opened successfully at 1732343850813 (+1 ms) 2024-11-23T06:37:30,814 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343850771 2024-11-23T06:37:30,816 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:37:30,816 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:37:30,817 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,41201,1732343849581 2024-11-23T06:37:30,818 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,41201,1732343849581, state=OPEN 2024-11-23T06:37:30,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:37:30,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:37:30,859 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,41201,1732343849581 2024-11-23T06:37:30,859 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:37:30,859 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:37:30,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:37:30,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,41201,1732343849581 in 242 msec 2024-11-23T06:37:30,868 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:37:30,868 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 476 msec 2024-11-23T06:37:30,869 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:37:30,869 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:37:30,871 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:37:30,871 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,41201,1732343849581, seqNum=-1] 2024-11-23T06:37:30,871 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:37:30,872 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49897, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:37:30,878 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 940 msec 2024-11-23T06:37:30,879 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343850879, completionTime=-1 2024-11-23T06:37:30,879 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:37:30,879 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732343910881 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732343970881 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,42835,1732343849399-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,42835,1732343849399-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,881 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,42835,1732343849399-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,882 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:42835, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,882 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,882 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,884 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.238sec 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,42835,1732343849399-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:37:30,886 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,42835,1732343849399-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:37:30,889 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:37:30,889 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:37:30,889 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,42835,1732343849399-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:37:30,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6beed7e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:37:30,903 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,42835,-1 for getting cluster id 2024-11-23T06:37:30,903 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:37:30,904 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f8ad3679-9010-43c7-9f83-f3c1fd1d33c9' 2024-11-23T06:37:30,905 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:37:30,905 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f8ad3679-9010-43c7-9f83-f3c1fd1d33c9" 2024-11-23T06:37:30,905 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58f955b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:37:30,905 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,42835,-1] 2024-11-23T06:37:30,905 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:37:30,906 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:30,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:37:30,906 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T06:37:30,907 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:37:30,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:37:30,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T06:37:30,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61726e31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:37:30,908 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:37:30,909 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,41201,1732343849581, seqNum=-1] 2024-11-23T06:37:30,909 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:37:30,911 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43102, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:37:30,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,42835,1732343849399 2024-11-23T06:37:30,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:37:30,915 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:37:30,915 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-23T06:37:30,915 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-23T06:37:30,915 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T06:37:30,916 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is df2f15951535,42835,1732343849399 2024-11-23T06:37:30,917 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@596ec161 2024-11-23T06:37:30,917 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T06:37:30,919 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59622, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T06:37:30,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T06:37:30,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T06:37:30,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:37:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T06:37:30,923 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T06:37:30,923 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:30,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-23T06:37:30,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:37:30,924 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T06:37:30,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741835_1011 (size=395) 2024-11-23T06:37:30,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741835_1011 (size=395) 2024-11-23T06:37:30,936 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 965493d26e778ae3c8fdf476c013fd20, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca 2024-11-23T06:37:30,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43123 is added to blk_1073741836_1012 (size=78) 2024-11-23T06:37:30,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34093 is added to blk_1073741836_1012 (size=78) 2024-11-23T06:37:30,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:37:30,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 965493d26e778ae3c8fdf476c013fd20, disabling compactions & flushes 2024-11-23T06:37:30,944 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:30,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:30,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. after waiting 0 ms 2024-11-23T06:37:30,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:30,944 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:30,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 965493d26e778ae3c8fdf476c013fd20: Waiting for close lock at 1732343850944Disabling compacts and flushes for region at 1732343850944Disabling writes for close at 1732343850944Writing region close event to WAL at 1732343850944Closed at 1732343850944 2024-11-23T06:37:30,946 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T06:37:30,946 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732343850946"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343850946"}]},"ts":"1732343850946"} 2024-11-23T06:37:30,949 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T06:37:30,950 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T06:37:30,950 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343850950"}]},"ts":"1732343850950"} 2024-11-23T06:37:30,953 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-23T06:37:30,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=965493d26e778ae3c8fdf476c013fd20, ASSIGN}] 2024-11-23T06:37:30,955 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=965493d26e778ae3c8fdf476c013fd20, ASSIGN 2024-11-23T06:37:30,957 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=965493d26e778ae3c8fdf476c013fd20, ASSIGN; state=OFFLINE, location=df2f15951535,41201,1732343849581; forceNewPlan=false, retain=false 2024-11-23T06:37:31,108 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=965493d26e778ae3c8fdf476c013fd20, regionState=OPENING, regionLocation=df2f15951535,41201,1732343849581 2024-11-23T06:37:31,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=965493d26e778ae3c8fdf476c013fd20, ASSIGN because future has completed 2024-11-23T06:37:31,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 965493d26e778ae3c8fdf476c013fd20, server=df2f15951535,41201,1732343849581}] 2024-11-23T06:37:31,280 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:31,280 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 965493d26e778ae3c8fdf476c013fd20, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:37:31,280 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,280 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:37:31,281 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,281 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,282 INFO [StoreOpener-965493d26e778ae3c8fdf476c013fd20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,284 INFO [StoreOpener-965493d26e778ae3c8fdf476c013fd20-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 965493d26e778ae3c8fdf476c013fd20 columnFamilyName info 2024-11-23T06:37:31,284 DEBUG [StoreOpener-965493d26e778ae3c8fdf476c013fd20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:37:31,284 INFO [StoreOpener-965493d26e778ae3c8fdf476c013fd20-1 {}] regionserver.HStore(327): Store=965493d26e778ae3c8fdf476c013fd20/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:37:31,284 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,285 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,286 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,286 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,286 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,288 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,291 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:37:31,291 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 965493d26e778ae3c8fdf476c013fd20; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771839, jitterRate=-0.018556997179985046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:37:31,291 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:31,292 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 965493d26e778ae3c8fdf476c013fd20: Running coprocessor pre-open hook at 1732343851281Writing region info on filesystem at 1732343851281Initializing all the Stores at 1732343851282 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343851282Cleaning up temporary data from old regions at 1732343851286 (+4 ms)Running coprocessor post-open hooks at 1732343851292 (+6 ms)Region opened successfully at 1732343851292 2024-11-23T06:37:31,294 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20., pid=6, masterSystemTime=1732343851270 2024-11-23T06:37:31,296 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:31,297 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:31,298 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=965493d26e778ae3c8fdf476c013fd20, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,41201,1732343849581 2024-11-23T06:37:31,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 965493d26e778ae3c8fdf476c013fd20, server=df2f15951535,41201,1732343849581 because future has completed 2024-11-23T06:37:31,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:31,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:31,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T06:37:31,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 965493d26e778ae3c8fdf476c013fd20, server=df2f15951535,41201,1732343849581 in 187 msec 2024-11-23T06:37:31,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T06:37:31,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=965493d26e778ae3c8fdf476c013fd20, ASSIGN in 352 msec 2024-11-23T06:37:31,309 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T06:37:31,309 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343851309"}]},"ts":"1732343851309"} 2024-11-23T06:37:31,311 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-23T06:37:31,313 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T06:37:31,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 393 msec 2024-11-23T06:37:32,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:32,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:33,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:33,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:34,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:34,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:35,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:35,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:36,305 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:37:36,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:36,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:36,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:37:36,337 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T06:37:36,338 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-23T06:37:37,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:37,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:38,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:38,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:39,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:39,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:40,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:40,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:40,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T06:37:40,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-23T06:37:41,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42835 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:37:41,016 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-23T06:37:41,017 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-23T06:37:41,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T06:37:41,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:41,026 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20., hostname=df2f15951535,41201,1732343849581, seqNum=2] 2024-11-23T06:37:41,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:41,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:42,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:42,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:43,029 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:43,030 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:43,031 WARN [DataStreamer for file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 block BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK], DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]) is bad. 2024-11-23T06:37:43,031 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:43,031 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:43,032 WARN [DataStreamer for file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta block BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK], DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]) is bad. 2024-11-23T06:37:43,032 WARN [PacketResponder: BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43123] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,032 WARN [DataStreamer for file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 block BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK], DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43123,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]) is bad. 2024-11-23T06:37:43,032 WARN [PacketResponder: BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43123] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:34640 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34640 dst: /127.0.0.1:34093 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-674512945_22 at /127.0.0.1:57822 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57822 dst: /127.0.0.1:43123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:57854 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57854 dst: /127.0.0.1:43123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:34656 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34656 dst: /127.0.0.1:34093 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-674512945_22 at /127.0.0.1:34620 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34620 dst: /127.0.0.1:34093 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:57858 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57858 dst: /127.0.0.1:43123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37687418{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:43,082 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e7f214b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:43,083 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:43,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47946b20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:43,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64434c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:43,085 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:43,085 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:43,085 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-598499779-172.17.0.3-1732343847083 (Datanode Uuid f576fb4a-11b8-4637-81d6-66ae4c594868) service to localhost/127.0.0.1:38355 2024-11-23T06:37:43,085 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:43,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data3/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:43,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data4/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:43,086 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:43,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:43,097 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:43,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:43,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:43,098 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:37:43,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:43,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:43,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c5aa216{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-39499-hadoop-hdfs-3_4_1-tests_jar-_-any-1919661689565371002/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:43,194 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:39499} 2024-11-23T06:37:43,194 INFO [Time-limited test {}] server.Server(415): Started @169191ms 2024-11-23T06:37:43,195 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:43,214 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:43,214 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:43,214 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:43,215 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:56288 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56288 dst: /127.0.0.1:34093 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,215 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:56292 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56292 dst: /127.0.0.1:34093 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,215 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-674512945_22 at /127.0.0.1:56286 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56286 dst: /127.0.0.1:34093 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:43,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fb42f9a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:43,217 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d74671d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:43,217 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:43,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13646a74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:43,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fc8bed8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:43,219 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:43,219 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:43,219 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-598499779-172.17.0.3-1732343847083 (Datanode Uuid 55900776-64c5-4508-aca2-ae63f3cd1351) service to localhost/127.0.0.1:38355 2024-11-23T06:37:43,219 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:43,219 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data1/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:43,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data2/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:43,220 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:43,229 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:43,231 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:43,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:43,232 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:43,232 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:37:43,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:43,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:43,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:43,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:43,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7180ac25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-41437-hadoop-hdfs-3_4_1-tests_jar-_-any-3592525751490945088/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:43,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:41437} 2024-11-23T06:37:43,326 INFO [Time-limited test {}] server.Server(415): Started @169323ms 2024-11-23T06:37:43,328 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:43,725 WARN [Thread-1328 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x702f8bfb3754f896 with lease ID 0x170f246ce1604e5d: from storage DS-45e7fee3-2bea-490f-b165-1f9937c883f3 node DatanodeRegistration(127.0.0.1:45537, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=37193, infoSecurePort=0, ipcPort=34503, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x702f8bfb3754f896 with lease ID 0x170f246ce1604e5d: from storage DS-891afff4-5430-468d-b3a3-417ac165acc2 node DatanodeRegistration(127.0.0.1:45537, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=37193, infoSecurePort=0, ipcPort=34503, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:43,873 WARN [Thread-1348 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:43,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x271576778364046 with lease ID 0x170f246ce1604e5e: from storage DS-58ebab4b-6297-4f78-8fa0-772540d0dd04 node DatanodeRegistration(127.0.0.1:42191, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=35485, infoSecurePort=0, ipcPort=43609, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:43,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x271576778364046 with lease ID 0x170f246ce1604e5e: from storage DS-c6a74855-42f0-440b-821d-59c86375446f node DatanodeRegistration(127.0.0.1:42191, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=35485, infoSecurePort=0, ipcPort=43609, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:44,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:44,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:44,345 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-23T06:37:44,349 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-23T06:37:44,351 ERROR [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:44,351 WARN [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:44,352 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41201%2C1732343849581:(num 1732343850195) roll requested 2024-11-23T06:37:44,352 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:44,358 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 newFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:44,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:44,359 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:44,359 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:44,359 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:44,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:44,359 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:44,360 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:44,360 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:44,360 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:44,360 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37193:37193),(127.0.0.1/127.0.0.1:35485:35485)] 2024-11-23T06:37:44,360 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 is not closed yet, will try archiving it next time 2024-11-23T06:37:44,360 WARN [IPC Server handler 1 on default port 38355 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-23T06:37:44,361 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 after 1ms 2024-11-23T06:37:45,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:45,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:46,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:46,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:46,366 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-23T06:37:46,727 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T06:37:47,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:47,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:48,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:48,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:48,362 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 after 4001ms 2024-11-23T06:37:48,374 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:42191,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:48,375 WARN [DataStreamer for file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 block BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45537,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK], DatanodeInfoWithStorage[127.0.0.1:42191,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42191,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]) is bad. 2024-11-23T06:37:48,375 WARN [PacketResponder: BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42191] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:48,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:40084 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40084 dst: /127.0.0.1:45537 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:48,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:51860 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42191:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51860 dst: /127.0.0.1:42191 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:48,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7180ac25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:48,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:48,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:48,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:48,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:48,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:48,471 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:48,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:48,471 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-598499779-172.17.0.3-1732343847083 (Datanode Uuid 55900776-64c5-4508-aca2-ae63f3cd1351) service to localhost/127.0.0.1:38355 2024-11-23T06:37:48,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data1/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:48,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data2/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:48,472 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:48,478 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:48,480 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:48,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:48,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:48,482 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:37:48,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19016e01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:48,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ef50a45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:48,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6db938{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-36983-hadoop-hdfs-3_4_1-tests_jar-_-any-10712542362476297683/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:48,578 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cf288ef{HTTP/1.1, (http/1.1)}{localhost:36983} 2024-11-23T06:37:48,578 INFO [Time-limited test {}] server.Server(415): Started @174575ms 2024-11-23T06:37:48,579 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:48,596 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:48,597 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_357075541_22 at /127.0.0.1:40108 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40108 dst: /127.0.0.1:45537 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:48,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c5aa216{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:48,601 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:37:48,601 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:37:48,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:37:48,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:37:48,602 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:37:48,602 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:37:48,602 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-598499779-172.17.0.3-1732343847083 (Datanode Uuid f576fb4a-11b8-4637-81d6-66ae4c594868) service to localhost/127.0.0.1:38355 2024-11-23T06:37:48,602 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:37:48,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data3/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:48,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data4/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:37:48,603 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:37:48,613 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:37:48,616 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:37:48,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:37:48,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:37:48,616 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:37:48,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c39138a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:37:48,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f3e5a16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:37:48,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41b9791e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/java.io.tmpdir/jetty-localhost-44441-hadoop-hdfs-3_4_1-tests_jar-_-any-939400073675497687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:37:48,712 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1240a3cc{HTTP/1.1, (http/1.1)}{localhost:44441} 2024-11-23T06:37:48,712 INFO [Time-limited test {}] server.Server(415): Started @174709ms 2024-11-23T06:37:48,713 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:37:49,171 WARN [Thread-1402 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:49,173 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x896b74ec4c485a55 with lease ID 0x170f246ce1604e5f: from storage DS-58ebab4b-6297-4f78-8fa0-772540d0dd04 node DatanodeRegistration(127.0.0.1:43509, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=36625, infoSecurePort=0, ipcPort=37015, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:49,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x896b74ec4c485a55 with lease ID 0x170f246ce1604e5f: from storage DS-c6a74855-42f0-440b-821d-59c86375446f node DatanodeRegistration(127.0.0.1:43509, datanodeUuid=55900776-64c5-4508-aca2-ae63f3cd1351, infoPort=36625, infoSecurePort=0, ipcPort=37015, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:49,304 WARN [Thread-1422 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:37:49,306 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x593c3e54897b883 with lease ID 0x170f246ce1604e60: from storage DS-45e7fee3-2bea-490f-b165-1f9937c883f3 node DatanodeRegistration(127.0.0.1:39351, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=34791, infoSecurePort=0, ipcPort=37273, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:37:49,306 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x593c3e54897b883 with lease ID 0x170f246ce1604e60: from storage DS-891afff4-5430-468d-b3a3-417ac165acc2 node DatanodeRegistration(127.0.0.1:39351, datanodeUuid=f576fb4a-11b8-4637-81d6-66ae4c594868, infoPort=34791, infoSecurePort=0, ipcPort=37273, storageInfo=lv=-57;cid=testClusterID;nsid=923481543;c=1732343847083), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T06:37:49,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:49,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:49,731 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-23T06:37:49,737 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-23T06:37:49,740 ERROR [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45537,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:49,740 WARN [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45537,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:49,740 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41201%2C1732343849581:(num 1732343864352) roll requested 2024-11-23T06:37:49,740 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.1732343869740 2024-11-23T06:37:49,749 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 newFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 2024-11-23T06:37:49,749 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:49,749 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:49,749 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:49,750 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:49,750 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:49,750 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 2024-11-23T06:37:49,750 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45537,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:49,750 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45537,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:49,750 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:49,751 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36625:36625),(127.0.0.1/127.0.0.1:34791:34791)] 2024-11-23T06:37:49,751 WARN [IPC Server handler 0 on default port 38355 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-23T06:37:49,751 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 is not closed yet, will try archiving it next time 2024-11-23T06:37:49,752 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 after 0ms 2024-11-23T06:37:50,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:50,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:51,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:51,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:51,753 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:51,759 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 newFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:51,760 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:51,760 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:51,760 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:51,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:51,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:51,760 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:51,761 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34791:34791),(127.0.0.1/127.0.0.1:36625:36625)] 2024-11-23T06:37:51,762 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 is not closed yet, will try archiving it next time 2024-11-23T06:37:51,762 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 is not closed yet, will try archiving it next time 2024-11-23T06:37:51,762 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:51,762 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:51,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741838_1019 (size=1264) 2024-11-23T06:37:51,763 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 after 1ms 2024-11-23T06:37:51,763 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:51,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741838_1019 (size=1264) 2024-11-23T06:37:51,767 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 is not closed yet, will try archiving it next time 2024-11-23T06:37:51,774 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732343851292/Put/vlen=218/seqid=0] 2024-11-23T06:37:51,774 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732343861027/Put/vlen=1045/seqid=0] 2024-11-23T06:37:51,774 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343850195 2024-11-23T06:37:51,774 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:51,774 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:51,775 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 after 1ms 2024-11-23T06:37:51,775 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:51,778 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732343864351/Put/vlen=1045/seqid=0] 2024-11-23T06:37:51,778 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732343866369/Put/vlen=1045/seqid=0] 2024-11-23T06:37:51,778 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 2024-11-23T06:37:51,778 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 2024-11-23T06:37:51,778 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 2024-11-23T06:37:51,779 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 after 1ms 2024-11-23T06:37:51,779 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343869740 2024-11-23T06:37:51,781 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732343869739/Put/vlen=1045/seqid=0] 2024-11-23T06:37:51,782 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:51,782 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:51,782 WARN [IPC Server handler 2 on default port 38355 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-23T06:37:51,782 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 after 0ms 2024-11-23T06:37:52,174 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T06:37:52,307 WARN [ResponseProcessor for block BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:52,307 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-674512945_22 at /127.0.0.1:40062 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39351:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40062 dst: /127.0.0.1:39351 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39351 remote=/127.0.0.1:40062]. Total timeout mills is 60000, 59451 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:52,308 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-674512945_22 at /127.0.0.1:58594 [Receiving block BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43509:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58594 dst: /127.0.0.1:43509 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:37:52,308 WARN [DataStreamer for file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 block BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39351,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK], DatanodeInfoWithStorage[127.0.0.1:43509,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39351,DS-45e7fee3-2bea-490f-b165-1f9937c883f3,DISK]) is bad. 2024-11-23T06:37:52,312 WARN [DataStreamer for file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 block BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:52,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741839_1022 (size=85) 2024-11-23T06:37:52,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741839_1022 (size=85) 2024-11-23T06:37:52,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:52,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:53,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:53,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:53,754 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343864352 after 4003ms 2024-11-23T06:37:54,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:54,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:55,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:55,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:55,783 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 after 4001ms 2024-11-23T06:37:55,783 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:55,789 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:55,789 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 965493d26e778ae3c8fdf476c013fd20 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-23T06:37:55,790 ERROR [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,791 WARN [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,791 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41201%2C1732343849581:(num 1732343871753) roll requested 2024-11-23T06:37:55,792 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.1732343875791 2024-11-23T06:37:55,798 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 newFile=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343875791 2024-11-23T06:37:55,798 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,798 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,798 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,799 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,799 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,799 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343875791 2024-11-23T06:37:55,799 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,799 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-598499779-172.17.0.3-1732343847083:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,800 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:55,800 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 after 0ms 2024-11-23T06:37:55,801 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36625:36625),(127.0.0.1/127.0.0.1:34791:34791)] 2024-11-23T06:37:55,801 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.1732343871753 to hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs/df2f15951535%2C41201%2C1732343849581.1732343871753 2024-11-23T06:37:55,815 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/.tmp/info/58c06bd2ec524c97b0649f5e6958e614 is 1080, key is row1002/info:/1732343861027/Put/seqid=0 2024-11-23T06:37:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741841_1024 (size=9270) 2024-11-23T06:37:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741841_1024 (size=9270) 2024-11-23T06:37:55,821 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/.tmp/info/58c06bd2ec524c97b0649f5e6958e614 2024-11-23T06:37:55,827 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/.tmp/info/58c06bd2ec524c97b0649f5e6958e614 as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/info/58c06bd2ec524c97b0649f5e6958e614 2024-11-23T06:37:55,832 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/info/58c06bd2ec524c97b0649f5e6958e614, entries=4, sequenceid=8, filesize=9.1 K 2024-11-23T06:37:55,833 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 965493d26e778ae3c8fdf476c013fd20 in 44ms, sequenceid=8, compaction requested=false 2024-11-23T06:37:55,833 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 965493d26e778ae3c8fdf476c013fd20: 2024-11-23T06:37:55,833 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-23T06:37:55,834 ERROR [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,834 WARN [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca-prefix:df2f15951535,41201,1732343849581.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,834 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C41201%2C1732343849581.meta:.meta(num 1732343850786) roll requested 2024-11-23T06:37:55,834 INFO [regionserver/df2f15951535:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41201%2C1732343849581.meta.1732343875834.meta 2024-11-23T06:37:55,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,839 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,839 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:55,840 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343875834.meta 2024-11-23T06:37:55,840 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,840 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:37:55,840 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta 2024-11-23T06:37:55,841 WARN [IPC Server handler 0 on default port 38355 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-23T06:37:55,841 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta after 1ms 2024-11-23T06:37:55,842 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36625:36625),(127.0.0.1/127.0.0.1:34791:34791)] 2024-11-23T06:37:55,842 DEBUG [regionserver/df2f15951535:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta is not closed yet, will try archiving it next time 2024-11-23T06:37:55,859 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/info/d212384671224831ba5000718879c13c is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20./info:regioninfo/1732343851298/Put/seqid=0 2024-11-23T06:37:55,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741843_1027 (size=7125) 2024-11-23T06:37:55,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741843_1027 (size=7125) 2024-11-23T06:37:55,864 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/info/d212384671224831ba5000718879c13c 2024-11-23T06:37:55,882 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/ns/9349fdfef4324056a32b6e4338d0051f is 43, key is default/ns:d/1732343850873/Put/seqid=0 2024-11-23T06:37:55,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741844_1028 (size=5153) 2024-11-23T06:37:55,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741844_1028 (size=5153) 2024-11-23T06:37:55,887 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/ns/9349fdfef4324056a32b6e4338d0051f 2024-11-23T06:37:55,906 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/table/1b2397e858de4e9c82587573d332dbe7 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732343851309/Put/seqid=0 2024-11-23T06:37:55,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741845_1029 (size=5438) 2024-11-23T06:37:55,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741845_1029 (size=5438) 2024-11-23T06:37:55,912 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/table/1b2397e858de4e9c82587573d332dbe7 2024-11-23T06:37:55,918 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/info/d212384671224831ba5000718879c13c as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/info/d212384671224831ba5000718879c13c 2024-11-23T06:37:55,923 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/info/d212384671224831ba5000718879c13c, entries=10, sequenceid=11, filesize=7.0 K 2024-11-23T06:37:55,924 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/ns/9349fdfef4324056a32b6e4338d0051f as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/ns/9349fdfef4324056a32b6e4338d0051f 2024-11-23T06:37:55,930 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/ns/9349fdfef4324056a32b6e4338d0051f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T06:37:55,931 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/.tmp/table/1b2397e858de4e9c82587573d332dbe7 as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/table/1b2397e858de4e9c82587573d332dbe7 2024-11-23T06:37:55,937 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/table/1b2397e858de4e9c82587573d332dbe7, entries=2, sequenceid=11, filesize=5.3 K 2024-11-23T06:37:55,938 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=11, compaction requested=false 2024-11-23T06:37:55,938 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-23T06:37:55,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:37:55,944 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:37:55,944 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:55,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:55,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:55,944 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:37:55,944 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:37:55,944 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=23578785, stopped=false 2024-11-23T06:37:55,944 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,42835,1732343849399 2024-11-23T06:37:56,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:56,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:37:56,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:56,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:56,015 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:37:56,015 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:37:56,015 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:56,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:56,016 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:56,016 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:37:56,016 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,41201,1732343849581' ***** 2024-11-23T06:37:56,016 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:37:56,016 INFO [RS:0;df2f15951535:41201 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:37:56,017 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(3091): Received CLOSE for 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(959): stopping server df2f15951535,41201,1732343849581 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:41201. 2024-11-23T06:37:56,017 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 965493d26e778ae3c8fdf476c013fd20, disabling compactions & flushes 2024-11-23T06:37:56,017 DEBUG [RS:0;df2f15951535:41201 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:37:56,017 DEBUG [RS:0;df2f15951535:41201 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:56,017 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:56,017 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:37:56,017 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. after waiting 0 ms 2024-11-23T06:37:56,017 INFO [RS:0;df2f15951535:41201 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:37:56,017 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:56,018 INFO [RS:0;df2f15951535:41201 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:37:56,018 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:37:56,018 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T06:37:56,018 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1325): Online Regions={965493d26e778ae3c8fdf476c013fd20=TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T06:37:56,018 DEBUG [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 965493d26e778ae3c8fdf476c013fd20 2024-11-23T06:37:56,018 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:37:56,018 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:37:56,018 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:37:56,018 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:37:56,018 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:37:56,023 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/965493d26e778ae3c8fdf476c013fd20/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-23T06:37:56,024 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T06:37:56,024 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:56,024 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 965493d26e778ae3c8fdf476c013fd20: Waiting for close lock at 1732343876017Running coprocessor pre-close hooks at 1732343876017Disabling compacts and flushes for region at 1732343876017Disabling writes for close at 1732343876017Writing region close event to WAL at 1732343876018 (+1 ms)Running coprocessor post-close hooks at 1732343876024 (+6 ms)Closed at 1732343876024 2024-11-23T06:37:56,024 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732343850919.965493d26e778ae3c8fdf476c013fd20. 2024-11-23T06:37:56,025 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:37:56,025 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:37:56,025 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343876018Running coprocessor pre-close hooks at 1732343876018Disabling compacts and flushes for region at 1732343876018Disabling writes for close at 1732343876018Writing region close event to WAL at 1732343876020 (+2 ms)Running coprocessor post-close hooks at 1732343876025 (+5 ms)Closed at 1732343876025 2024-11-23T06:37:56,025 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:37:56,062 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T06:37:56,062 INFO [regionserver/df2f15951535:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T06:37:56,062 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:37:56,218 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(976): stopping server df2f15951535,41201,1732343849581; all regions closed. 2024-11-23T06:37:56,220 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:56,220 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:56,220 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:56,221 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:56,221 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:56,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741842_1025 (size=825) 2024-11-23T06:37:56,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741842_1025 (size=825) 2024-11-23T06:37:56,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:56,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:57,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:57,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:58,309 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T06:37:58,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:58,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:59,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:59,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:37:59,376 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:37:59,843 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta after 4003ms 2024-11-23T06:37:59,844 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/WALs/df2f15951535,41201,1732343849581/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta to hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs/df2f15951535%2C41201%2C1732343849581.meta.1732343850786.meta 2024-11-23T06:37:59,892 DEBUG [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs 2024-11-23T06:37:59,892 INFO [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C41201%2C1732343849581.meta:.meta(num 1732343875834) 2024-11-23T06:37:59,893 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:59,893 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:59,894 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:59,894 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:59,894 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:37:59,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741840_1023 (size=1162) 2024-11-23T06:37:59,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741840_1023 (size=1162) 2024-11-23T06:37:59,907 DEBUG [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs 2024-11-23T06:37:59,907 INFO [RS:0;df2f15951535:41201 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C41201%2C1732343849581:(num 1732343875791) 2024-11-23T06:37:59,907 DEBUG [RS:0;df2f15951535:41201 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:37:59,907 INFO [RS:0;df2f15951535:41201 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:37:59,907 INFO [RS:0;df2f15951535:41201 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:37:59,907 INFO [RS:0;df2f15951535:41201 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:37:59,908 INFO [RS:0;df2f15951535:41201 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:37:59,908 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:37:59,908 INFO [RS:0;df2f15951535:41201 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41201 2024-11-23T06:37:59,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,41201,1732343849581 2024-11-23T06:37:59,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:37:59,946 INFO [RS:0;df2f15951535:41201 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:37:59,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,41201,1732343849581] 2024-11-23T06:37:59,978 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,41201,1732343849581 already deleted, retry=false 2024-11-23T06:37:59,978 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,41201,1732343849581 expired; onlineServers=0 2024-11-23T06:37:59,979 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,42835,1732343849399' ***** 2024-11-23T06:37:59,979 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:37:59,979 INFO [M:0;df2f15951535:42835 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:37:59,979 INFO [M:0;df2f15951535:42835 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:37:59,980 DEBUG [M:0;df2f15951535:42835 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:37:59,980 DEBUG [M:0;df2f15951535:42835 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:37:59,980 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343849943 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343849943,5,FailOnTimeoutGroup] 2024-11-23T06:37:59,980 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343849943 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343849943,5,FailOnTimeoutGroup] 2024-11-23T06:37:59,980 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:37:59,981 INFO [M:0;df2f15951535:42835 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:37:59,982 INFO [M:0;df2f15951535:42835 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:37:59,982 DEBUG [M:0;df2f15951535:42835 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:37:59,982 INFO [M:0;df2f15951535:42835 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:37:59,982 INFO [M:0;df2f15951535:42835 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:37:59,982 INFO [M:0;df2f15951535:42835 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:37:59,983 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:37:59,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:37:59,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:37:59,993 DEBUG [M:0;df2f15951535:42835 {}] zookeeper.ZKUtil(347): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:37:59,993 WARN [M:0;df2f15951535:42835 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:37:59,994 INFO [M:0;df2f15951535:42835 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/.lastflushedseqids 2024-11-23T06:37:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741846_1030 (size=111) 2024-11-23T06:37:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741846_1030 (size=111) 2024-11-23T06:37:59,999 INFO [M:0;df2f15951535:42835 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:38:00,000 INFO [M:0;df2f15951535:42835 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:38:00,000 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:38:00,000 INFO [M:0;df2f15951535:42835 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:00,000 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:00,000 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:38:00,000 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:00,000 INFO [M:0;df2f15951535:42835 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-23T06:38:00,001 ERROR [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData-prefix:df2f15951535,42835,1732343849399 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:38:00,001 WARN [FSHLog-0-hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData-prefix:df2f15951535,42835,1732343849399 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:38:00,001 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog df2f15951535%2C42835%2C1732343849399:(num 1732343849726) roll requested 2024-11-23T06:38:00,002 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C42835%2C1732343849399.1732343880001 2024-11-23T06:38:00,008 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,008 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,008 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,008 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,008 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,009 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343880001 2024-11-23T06:38:00,009 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:38:00,009 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34093,DS-58ebab4b-6297-4f78-8fa0-772540d0dd04,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T06:38:00,009 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 2024-11-23T06:38:00,009 WARN [IPC Server handler 1 on default port 38355 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-23T06:38:00,010 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 after 1ms 2024-11-23T06:38:00,012 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36625:36625),(127.0.0.1/127.0.0.1:34791:34791)] 2024-11-23T06:38:00,012 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 is not closed yet, will try archiving it next time 2024-11-23T06:38:00,029 DEBUG [M:0;df2f15951535:42835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6677e2f0ac7b4ab1b631ac2d63fcafa0 is 82, key is hbase:meta,,1/info:regioninfo/1732343850817/Put/seqid=0 2024-11-23T06:38:00,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741848_1033 (size=5672) 2024-11-23T06:38:00,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741848_1033 (size=5672) 2024-11-23T06:38:00,035 INFO [M:0;df2f15951535:42835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6677e2f0ac7b4ab1b631ac2d63fcafa0 2024-11-23T06:38:00,054 DEBUG [M:0;df2f15951535:42835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f0c66f1fbca4e9ba82d9a6e82658172 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732343851314/Put/seqid=0 2024-11-23T06:38:00,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741849_1034 (size=6118) 2024-11-23T06:38:00,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741849_1034 (size=6118) 2024-11-23T06:38:00,059 INFO [M:0;df2f15951535:42835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f0c66f1fbca4e9ba82d9a6e82658172 2024-11-23T06:38:00,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:00,068 INFO [RS:0;df2f15951535:41201 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:38:00,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41201-0x10166695d4e0001, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:00,068 INFO [RS:0;df2f15951535:41201 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,41201,1732343849581; zookeeper connection closed. 2024-11-23T06:38:00,069 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4fd0979d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4fd0979d 2024-11-23T06:38:00,069 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T06:38:00,077 DEBUG [M:0;df2f15951535:42835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b66bcec9fcd41e0a28387cef3b82a59 is 69, key is df2f15951535,41201,1732343849581/rs:state/1732343850038/Put/seqid=0 2024-11-23T06:38:00,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741850_1035 (size=5156) 2024-11-23T06:38:00,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741850_1035 (size=5156) 2024-11-23T06:38:00,082 INFO [M:0;df2f15951535:42835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b66bcec9fcd41e0a28387cef3b82a59 2024-11-23T06:38:00,101 DEBUG [M:0;df2f15951535:42835 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c700fff1b814ddfb48254bb04b3ff44 is 52, key is load_balancer_on/state:d/1732343850914/Put/seqid=0 2024-11-23T06:38:00,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741851_1036 (size=5056) 2024-11-23T06:38:00,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741851_1036 (size=5056) 2024-11-23T06:38:00,105 INFO [M:0;df2f15951535:42835 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c700fff1b814ddfb48254bb04b3ff44 2024-11-23T06:38:00,110 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6677e2f0ac7b4ab1b631ac2d63fcafa0 as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6677e2f0ac7b4ab1b631ac2d63fcafa0 2024-11-23T06:38:00,115 INFO [M:0;df2f15951535:42835 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6677e2f0ac7b4ab1b631ac2d63fcafa0, entries=8, sequenceid=56, filesize=5.5 K 2024-11-23T06:38:00,115 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f0c66f1fbca4e9ba82d9a6e82658172 as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f0c66f1fbca4e9ba82d9a6e82658172 2024-11-23T06:38:00,121 INFO [M:0;df2f15951535:42835 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f0c66f1fbca4e9ba82d9a6e82658172, entries=6, sequenceid=56, filesize=6.0 K 2024-11-23T06:38:00,122 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b66bcec9fcd41e0a28387cef3b82a59 as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b66bcec9fcd41e0a28387cef3b82a59 2024-11-23T06:38:00,127 INFO [M:0;df2f15951535:42835 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b66bcec9fcd41e0a28387cef3b82a59, entries=1, sequenceid=56, filesize=5.0 K 2024-11-23T06:38:00,128 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c700fff1b814ddfb48254bb04b3ff44 as hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8c700fff1b814ddfb48254bb04b3ff44 2024-11-23T06:38:00,132 INFO [M:0;df2f15951535:42835 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8c700fff1b814ddfb48254bb04b3ff44, entries=1, sequenceid=56, filesize=4.9 K 2024-11-23T06:38:00,133 INFO [M:0;df2f15951535:42835 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false 2024-11-23T06:38:00,135 INFO [M:0;df2f15951535:42835 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:00,135 DEBUG [M:0;df2f15951535:42835 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343880000Disabling compacts and flushes for region at 1732343880000Disabling writes for close at 1732343880000Obtaining lock to block concurrent updates at 1732343880000Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343880000Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732343880001 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343880012 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343880013 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343880029 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343880029Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343880040 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343880054 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343880054Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343880064 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343880076 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343880076Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343880087 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343880100 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343880100Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41dba4b6: reopening flushed file at 1732343880109 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6aed281: reopening flushed file at 1732343880115 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33c92562: reopening flushed file at 1732343880121 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bb2b07e: reopening flushed file at 1732343880127 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false at 1732343880133 (+6 ms)Writing region close event to WAL at 1732343880135 (+2 ms)Closed at 1732343880135 2024-11-23T06:38:00,135 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,136 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:00,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39351 is added to blk_1073741847_1031 (size=757) 2024-11-23T06:38:00,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43509 is added to blk_1073741847_1031 (size=757) 2024-11-23T06:38:00,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:00,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:00,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:38:00,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:38:00,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T06:38:00,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T06:38:01,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,308 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T06:38:01,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:01,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:01,689 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:38:01,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:01,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:02,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:02,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:03,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:03,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:04,011 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 after 4002ms 2024-11-23T06:38:04,013 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/WALs/df2f15951535,42835,1732343849399/df2f15951535%2C42835%2C1732343849399.1732343849726 to hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/oldWALs/df2f15951535%2C42835%2C1732343849399.1732343849726 2024-11-23T06:38:04,021 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/MasterData/oldWALs/df2f15951535%2C42835%2C1732343849399.1732343849726 to hdfs://localhost:38355/user/jenkins/test-data/6e1950a8-2089-802d-2d57-13ef2f30e8ca/oldWALs/df2f15951535%2C42835%2C1732343849399.1732343849726$masterlocalwal$ 2024-11-23T06:38:04,021 INFO [M:0;df2f15951535:42835 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:38:04,021 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:38:04,021 INFO [M:0;df2f15951535:42835 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42835 2024-11-23T06:38:04,021 INFO [M:0;df2f15951535:42835 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:38:04,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:04,184 INFO [M:0;df2f15951535:42835 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:38:04,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42835-0x10166695d4e0000, quorum=127.0.0.1:56651, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:04,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41b9791e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:04,190 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1240a3cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:38:04,190 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:38:04,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f3e5a16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:38:04,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c39138a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:38:04,192 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:38:04,192 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-598499779-172.17.0.3-1732343847083 (Datanode Uuid f576fb4a-11b8-4637-81d6-66ae4c594868) service to localhost/127.0.0.1:38355 2024-11-23T06:38:04,192 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:38:04,192 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:38:04,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data3/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:04,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data4/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:04,193 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:38:04,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6db938{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:04,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cf288ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:38:04,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:38:04,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ef50a45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:38:04,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19016e01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:38:04,198 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:38:04,198 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:38:04,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:38:04,198 WARN [BP-598499779-172.17.0.3-1732343847083 heartbeating to localhost/127.0.0.1:38355 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-598499779-172.17.0.3-1732343847083 (Datanode Uuid 55900776-64c5-4508-aca2-ae63f3cd1351) service to localhost/127.0.0.1:38355 2024-11-23T06:38:04,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data1/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:04,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/cluster_f88cceaa-d932-1bf3-91b4-1eab5250d006/data/data2/current/BP-598499779-172.17.0.3-1732343847083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:04,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:38:04,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49ef22be{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:38:04,205 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b230242{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:38:04,205 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:38:04,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7096145a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:38:04,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25a29a07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir/,STOPPED} 2024-11-23T06:38:04,211 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:38:04,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:38:04,233 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 157) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:38355 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38355 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38355 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38355 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38355 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38355 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38355 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38355 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 188), ProcessCount=11 (was 11), AvailableMemoryMB=7640 (was 7825) 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=7640 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.log.dir so I do NOT create it in target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9e8f80d2-b551-59aa-6f51-5420dd54a6f9/hadoop.tmp.dir so I do NOT create it in target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319, deleteOnExit=true 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/test.cache.data in system properties and HBase conf 2024-11-23T06:38:04,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:38:04,241 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:38:04,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:38:04,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:38:04,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:38:04,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:38:04,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:38:04,254 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:38:04,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:04,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:04,623 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:04,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:38:04,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:38:04,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:38:04,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:38:04,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:04,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc3ea71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:38:04,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8d4c846{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:38:04,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@611cc51f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/java.io.tmpdir/jetty-localhost-34325-hadoop-hdfs-3_4_1-tests_jar-_-any-11732554742346793467/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:38:04,721 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@77877788{HTTP/1.1, (http/1.1)}{localhost:34325} 2024-11-23T06:38:04,721 INFO [Time-limited test {}] server.Server(415): Started @190718ms 2024-11-23T06:38:04,732 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:38:04,991 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:04,993 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:38:04,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:38:04,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:38:04,994 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:38:04,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74b5ebca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:38:04,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a2ef153{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:38:05,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@278dab99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/java.io.tmpdir/jetty-localhost-35373-hadoop-hdfs-3_4_1-tests_jar-_-any-16436723766702945578/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:05,087 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a6c1f86{HTTP/1.1, (http/1.1)}{localhost:35373} 2024-11-23T06:38:05,087 INFO [Time-limited test {}] server.Server(415): Started @191084ms 2024-11-23T06:38:05,088 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:38:05,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:05,116 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:38:05,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:38:05,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:38:05,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:38:05,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f411ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:38:05,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e2a30ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:38:05,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7009eb0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/java.io.tmpdir/jetty-localhost-33139-hadoop-hdfs-3_4_1-tests_jar-_-any-15610625159128408922/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:05,211 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38d3f6f8{HTTP/1.1, (http/1.1)}{localhost:33139} 2024-11-23T06:38:05,211 INFO [Time-limited test {}] server.Server(415): Started @191208ms 2024-11-23T06:38:05,212 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:38:05,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:05,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:06,287 WARN [Thread-1642 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data1/current/BP-792531804-172.17.0.3-1732343884264/current, will proceed with Du for space computation calculation, 2024-11-23T06:38:06,287 WARN [Thread-1643 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data2/current/BP-792531804-172.17.0.3-1732343884264/current, will proceed with Du for space computation calculation, 2024-11-23T06:38:06,308 WARN [Thread-1606 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:38:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8749017ee12c7a7d with lease ID 0x9b60937680cfe95c: Processing first storage report for DS-43e03a14-7211-485b-a384-0278ab450425 from datanode DatanodeRegistration(127.0.0.1:35075, datanodeUuid=31c67cb7-da48-414d-838c-01ef83274af3, infoPort=40761, infoSecurePort=0, ipcPort=45551, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264) 2024-11-23T06:38:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8749017ee12c7a7d with lease ID 0x9b60937680cfe95c: from storage DS-43e03a14-7211-485b-a384-0278ab450425 node DatanodeRegistration(127.0.0.1:35075, datanodeUuid=31c67cb7-da48-414d-838c-01ef83274af3, infoPort=40761, infoSecurePort=0, ipcPort=45551, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:38:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8749017ee12c7a7d with lease ID 0x9b60937680cfe95c: Processing first storage report for DS-222dc474-626d-4197-aa0d-32066035ba34 from datanode DatanodeRegistration(127.0.0.1:35075, datanodeUuid=31c67cb7-da48-414d-838c-01ef83274af3, infoPort=40761, infoSecurePort=0, ipcPort=45551, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264) 2024-11-23T06:38:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8749017ee12c7a7d with lease ID 0x9b60937680cfe95c: from storage DS-222dc474-626d-4197-aa0d-32066035ba34 node DatanodeRegistration(127.0.0.1:35075, datanodeUuid=31c67cb7-da48-414d-838c-01ef83274af3, infoPort=40761, infoSecurePort=0, ipcPort=45551, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:38:06,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:06,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:06,430 WARN [Thread-1653 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data3/current/BP-792531804-172.17.0.3-1732343884264/current, will proceed with Du for space computation calculation, 2024-11-23T06:38:06,431 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data4/current/BP-792531804-172.17.0.3-1732343884264/current, will proceed with Du for space computation calculation, 2024-11-23T06:38:06,450 WARN [Thread-1629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:38:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x626740131cb42e72 with lease ID 0x9b60937680cfe95d: Processing first storage report for DS-7bba0840-18ee-4e99-850e-fab1f893f998 from datanode DatanodeRegistration(127.0.0.1:44847, datanodeUuid=a204d104-e9a2-49bb-b0af-9590785b25e1, infoPort=39567, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264) 2024-11-23T06:38:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x626740131cb42e72 with lease ID 0x9b60937680cfe95d: from storage DS-7bba0840-18ee-4e99-850e-fab1f893f998 node DatanodeRegistration(127.0.0.1:44847, datanodeUuid=a204d104-e9a2-49bb-b0af-9590785b25e1, infoPort=39567, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:38:06,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x626740131cb42e72 with lease ID 0x9b60937680cfe95d: Processing first storage report for DS-452211ea-be62-46a6-b8d2-404b207e2387 from datanode DatanodeRegistration(127.0.0.1:44847, datanodeUuid=a204d104-e9a2-49bb-b0af-9590785b25e1, infoPort=39567, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264) 2024-11-23T06:38:06,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x626740131cb42e72 with lease ID 0x9b60937680cfe95d: from storage DS-452211ea-be62-46a6-b8d2-404b207e2387 node DatanodeRegistration(127.0.0.1:44847, datanodeUuid=a204d104-e9a2-49bb-b0af-9590785b25e1, infoPort=39567, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=1198300125;c=1732343884264), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:38:06,545 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3 2024-11-23T06:38:06,547 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/zookeeper_0, clientPort=54875, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:38:06,548 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54875 2024-11-23T06:38:06,548 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:38:06,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:38:06,560 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c with version=8 2024-11-23T06:38:06,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:38:06,561 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:38:06,561 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:38:06,562 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45461 2024-11-23T06:38:06,564 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45461 connecting to ZooKeeper ensemble=127.0.0.1:54875 2024-11-23T06:38:06,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454610x0, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:38:06,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45461-0x1016669ee7e0000 connected 2024-11-23T06:38:06,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:38:06,742 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c, hbase.cluster.distributed=false 2024-11-23T06:38:06,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:38:06,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45461 2024-11-23T06:38:06,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45461 2024-11-23T06:38:06,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45461 2024-11-23T06:38:06,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45461 2024-11-23T06:38:06,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45461 2024-11-23T06:38:06,767 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:38:06,767 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:38:06,767 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:38:06,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:38:06,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:38:06,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:38:06,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:38:06,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:38:06,768 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41363 2024-11-23T06:38:06,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41363 connecting to ZooKeeper ensemble=127.0.0.1:54875 2024-11-23T06:38:06,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413630x0, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:38:06,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41363-0x1016669ee7e0001 connected 2024-11-23T06:38:06,783 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:38:06,783 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:38:06,784 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:38:06,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:38:06,785 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:38:06,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41363 2024-11-23T06:38:06,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41363 2024-11-23T06:38:06,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41363 2024-11-23T06:38:06,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41363 2024-11-23T06:38:06,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41363 2024-11-23T06:38:06,800 DEBUG [M:0;df2f15951535:45461 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:45461 2024-11-23T06:38:06,800 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,45461,1732343886561 2024-11-23T06:38:06,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:38:06,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:38:06,810 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,45461,1732343886561 2024-11-23T06:38:06,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:38:06,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:06,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:06,820 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:38:06,821 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,45461,1732343886561 from backup master directory 2024-11-23T06:38:06,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:38:06,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,45461,1732343886561 2024-11-23T06:38:06,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:38:06,894 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:38:06,894 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,45461,1732343886561 2024-11-23T06:38:06,905 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/hbase.id] with ID: 5fc064bc-3f4e-4fb1-a885-e7586f386c36 2024-11-23T06:38:06,905 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/.tmp/hbase.id 2024-11-23T06:38:06,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:38:06,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:38:06,914 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/.tmp/hbase.id]:[hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/hbase.id] 2024-11-23T06:38:06,926 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:06,926 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:38:06,927 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-23T06:38:06,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:06,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:06,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:38:06,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:38:06,967 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:38:06,967 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:38:06,967 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:38:06,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:38:06,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:38:06,979 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store 2024-11-23T06:38:06,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:38:06,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:38:06,990 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:38:06,990 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:38:06,990 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:06,990 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:06,990 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:38:06,990 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:06,990 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:06,990 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343886990Disabling compacts and flushes for region at 1732343886990Disabling writes for close at 1732343886990Writing region close event to WAL at 1732343886990Closed at 1732343886990 2024-11-23T06:38:06,991 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/.initializing 2024-11-23T06:38:06,991 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/WALs/df2f15951535,45461,1732343886561 2024-11-23T06:38:06,995 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C45461%2C1732343886561, suffix=, logDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/WALs/df2f15951535,45461,1732343886561, archiveDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/oldWALs, maxLogs=10 2024-11-23T06:38:06,996 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C45461%2C1732343886561.1732343886996 2024-11-23T06:38:07,001 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/WALs/df2f15951535,45461,1732343886561/df2f15951535%2C45461%2C1732343886561.1732343886996 2024-11-23T06:38:07,002 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39567:39567),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-23T06:38:07,002 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:38:07,003 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:38:07,003 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,003 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:38:07,006 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:38:07,008 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:38:07,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:38:07,011 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:38:07,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,013 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:38:07,013 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:38:07,014 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,015 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,016 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,018 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,018 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,019 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:38:07,020 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:38:07,023 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:38:07,024 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712096, jitterRate=-0.09452411532402039}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:38:07,025 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343887003Initializing all the Stores at 1732343887004 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887004Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343887004Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343887005 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343887005Cleaning up temporary data from old regions at 1732343887018 (+13 ms)Region opened successfully at 1732343887025 (+7 ms) 2024-11-23T06:38:07,028 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:38:07,034 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@688d8025, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:38:07,036 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:38:07,036 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:38:07,036 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:38:07,036 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:38:07,040 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 3 msec 2024-11-23T06:38:07,040 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T06:38:07,040 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:38:07,050 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:38:07,051 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:38:07,062 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:38:07,062 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:38:07,063 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:38:07,072 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:38:07,073 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:38:07,074 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:38:07,083 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:38:07,084 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:38:07,093 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:38:07,096 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:38:07,104 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:38:07,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:38:07,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:38:07,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,115 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,45461,1732343886561, sessionid=0x1016669ee7e0000, setting cluster-up flag (Was=false) 2024-11-23T06:38:07,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,167 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:38:07,168 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,45461,1732343886561 2024-11-23T06:38:07,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,220 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:38:07,221 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,45461,1732343886561 2024-11-23T06:38:07,222 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:38:07,223 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:38:07,223 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:38:07,223 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:38:07,223 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,45461,1732343886561 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:38:07,225 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732343917226 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,226 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:38:07,227 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:38:07,227 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:38:07,227 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343887227,5,FailOnTimeoutGroup] 2024-11-23T06:38:07,227 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343887227,5,FailOnTimeoutGroup] 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:38:07,227 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,228 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,228 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,228 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:38:07,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:38:07,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:38:07,237 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:38:07,238 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c 2024-11-23T06:38:07,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:38:07,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:38:07,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:38:07,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:38:07,248 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:38:07,248 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:38:07,250 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:38:07,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:38:07,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:38:07,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:38:07,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:38:07,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:38:07,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740 2024-11-23T06:38:07,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740 2024-11-23T06:38:07,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:38:07,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:38:07,257 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:38:07,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:38:07,260 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:38:07,261 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826071, jitterRate=0.05040362477302551}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:38:07,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343887246Initializing all the Stores at 1732343887247 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887247Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887247Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343887247Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887247Cleaning up temporary data from old regions at 1732343887257 (+10 ms)Region opened successfully at 1732343887261 (+4 ms) 2024-11-23T06:38:07,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:38:07,262 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:38:07,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:38:07,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:38:07,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:38:07,262 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:38:07,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343887262Disabling compacts and flushes for region at 1732343887262Disabling writes for close at 1732343887262Writing region close event to WAL at 1732343887262Closed at 1732343887262 2024-11-23T06:38:07,263 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:38:07,263 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:38:07,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:38:07,265 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:38:07,266 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:38:07,289 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(746): ClusterId : 5fc064bc-3f4e-4fb1-a885-e7586f386c36 2024-11-23T06:38:07,289 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:38:07,321 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:38:07,321 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:38:07,332 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:38:07,332 DEBUG [RS:0;df2f15951535:41363 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b1a067, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:38:07,347 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:41363 2024-11-23T06:38:07,347 INFO [RS:0;df2f15951535:41363 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:38:07,347 INFO [RS:0;df2f15951535:41363 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:38:07,347 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:38:07,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:07,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:07,348 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,45461,1732343886561 with port=41363, startcode=1732343886767 2024-11-23T06:38:07,348 DEBUG [RS:0;df2f15951535:41363 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:38:07,349 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:38:07,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45461 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,41363,1732343886767 2024-11-23T06:38:07,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45461 {}] master.ServerManager(517): Registering regionserver=df2f15951535,41363,1732343886767 2024-11-23T06:38:07,351 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c 2024-11-23T06:38:07,351 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35207 2024-11-23T06:38:07,351 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:38:07,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:38:07,362 DEBUG [RS:0;df2f15951535:41363 {}] zookeeper.ZKUtil(111): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,41363,1732343886767 2024-11-23T06:38:07,362 WARN [RS:0;df2f15951535:41363 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:38:07,362 INFO [RS:0;df2f15951535:41363 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:38:07,362 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767 2024-11-23T06:38:07,362 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,41363,1732343886767] 2024-11-23T06:38:07,365 INFO [RS:0;df2f15951535:41363 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:38:07,367 INFO [RS:0;df2f15951535:41363 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:38:07,367 INFO [RS:0;df2f15951535:41363 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:38:07,368 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,368 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:38:07,369 INFO [RS:0;df2f15951535:41363 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:38:07,369 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,369 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,370 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,370 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,370 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,370 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:38:07,370 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:38:07,370 DEBUG [RS:0;df2f15951535:41363 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:38:07,371 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,371 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,371 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,371 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,371 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,371 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41363,1732343886767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:38:07,388 INFO [RS:0;df2f15951535:41363 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:38:07,388 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41363,1732343886767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,388 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,388 INFO [RS:0;df2f15951535:41363 {}] regionserver.Replication(171): df2f15951535,41363,1732343886767 started 2024-11-23T06:38:07,401 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,401 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,41363,1732343886767, RpcServer on df2f15951535/172.17.0.3:41363, sessionid=0x1016669ee7e0001 2024-11-23T06:38:07,401 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:38:07,401 DEBUG [RS:0;df2f15951535:41363 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,41363,1732343886767 2024-11-23T06:38:07,401 DEBUG [RS:0;df2f15951535:41363 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,41363,1732343886767' 2024-11-23T06:38:07,401 DEBUG [RS:0;df2f15951535:41363 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:38:07,402 DEBUG [RS:0;df2f15951535:41363 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:38:07,403 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:38:07,403 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:38:07,403 DEBUG [RS:0;df2f15951535:41363 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,41363,1732343886767 2024-11-23T06:38:07,403 DEBUG [RS:0;df2f15951535:41363 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,41363,1732343886767' 2024-11-23T06:38:07,403 DEBUG [RS:0;df2f15951535:41363 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:38:07,403 DEBUG [RS:0;df2f15951535:41363 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:38:07,404 DEBUG [RS:0;df2f15951535:41363 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:38:07,404 INFO [RS:0;df2f15951535:41363 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:38:07,404 INFO [RS:0;df2f15951535:41363 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:38:07,416 WARN [df2f15951535:45461 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T06:38:07,506 INFO [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41363%2C1732343886767, suffix=, logDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767, archiveDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/oldWALs, maxLogs=32 2024-11-23T06:38:07,507 INFO [RS:0;df2f15951535:41363 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41363%2C1732343886767.1732343887506 2024-11-23T06:38:07,513 INFO [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343887506 2024-11-23T06:38:07,514 DEBUG [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:39567:39567)] 2024-11-23T06:38:07,666 DEBUG [df2f15951535:45461 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:38:07,667 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,41363,1732343886767 2024-11-23T06:38:07,670 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,41363,1732343886767, state=OPENING 2024-11-23T06:38:07,720 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:38:07,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:07,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:38:07,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:38:07,732 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:38:07,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,41363,1732343886767}] 2024-11-23T06:38:07,885 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:38:07,887 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54923, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:38:07,890 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:38:07,891 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:38:07,893 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41363%2C1732343886767.meta, suffix=.meta, logDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767, archiveDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/oldWALs, maxLogs=32 2024-11-23T06:38:07,893 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41363%2C1732343886767.meta.1732343887893.meta 2024-11-23T06:38:07,900 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.meta.1732343887893.meta 2024-11-23T06:38:07,901 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39567:39567),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-23T06:38:07,901 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:38:07,902 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:38:07,902 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:38:07,902 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:38:07,902 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:38:07,902 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:38:07,902 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:38:07,902 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:38:07,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:38:07,904 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:38:07,904 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,905 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:38:07,906 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:38:07,906 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:38:07,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:38:07,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:38:07,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:38:07,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:07,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:38:07,908 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:38:07,909 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740 2024-11-23T06:38:07,910 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740 2024-11-23T06:38:07,911 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:38:07,911 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:38:07,912 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:38:07,914 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:38:07,915 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824752, jitterRate=0.04872654378414154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:38:07,915 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:38:07,915 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343887902Writing region info on filesystem at 1732343887902Initializing all the Stores at 1732343887903 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887903Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887903Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343887903Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343887903Cleaning up temporary data from old regions at 1732343887911 (+8 ms)Running coprocessor post-open hooks at 1732343887915 (+4 ms)Region opened successfully at 1732343887915 2024-11-23T06:38:07,917 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343887885 2024-11-23T06:38:07,920 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:38:07,920 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:38:07,921 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,41363,1732343886767 2024-11-23T06:38:07,923 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,41363,1732343886767, state=OPEN 2024-11-23T06:38:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:38:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:38:07,962 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,41363,1732343886767 2024-11-23T06:38:07,962 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:38:07,962 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:38:07,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:38:07,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,41363,1732343886767 in 230 msec 2024-11-23T06:38:07,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:38:07,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 705 msec 2024-11-23T06:38:07,974 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:38:07,974 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:38:07,975 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:38:07,976 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,41363,1732343886767, seqNum=-1] 2024-11-23T06:38:07,976 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:38:07,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47879, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:38:07,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 760 msec 2024-11-23T06:38:07,985 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343887985, completionTime=-1 2024-11-23T06:38:07,985 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:38:07,985 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:38:07,987 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732343947988 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732344007988 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,45461,1732343886561-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,45461,1732343886561-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,45461,1732343886561-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:45461, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,988 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,989 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:07,991 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:38:07,992 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.097sec 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,45461,1732343886561-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:38:07,993 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,45461,1732343886561-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:38:07,995 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:38:07,995 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:38:07,995 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,45461,1732343886561-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:08,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69c9ac98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:38:08,089 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,45461,-1 for getting cluster id 2024-11-23T06:38:08,090 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:38:08,091 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5fc064bc-3f4e-4fb1-a885-e7586f386c36' 2024-11-23T06:38:08,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:38:08,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5fc064bc-3f4e-4fb1-a885-e7586f386c36" 2024-11-23T06:38:08,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fc23ac2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:38:08,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,45461,-1] 2024-11-23T06:38:08,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:38:08,093 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:38:08,094 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41672, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:38:08,095 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f0d70d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:38:08,095 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:38:08,096 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,41363,1732343886767, seqNum=-1] 2024-11-23T06:38:08,096 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:38:08,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36060, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:38:08,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,45461,1732343886561 2024-11-23T06:38:08,099 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:38:08,102 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:38:08,102 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T06:38:08,103 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is df2f15951535,45461,1732343886561 2024-11-23T06:38:08,103 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2f022144 2024-11-23T06:38:08,103 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T06:38:08,105 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41678, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T06:38:08,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T06:38:08,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T06:38:08,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:38:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:08,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T06:38:08,108 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:08,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-23T06:38:08,109 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T06:38:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:38:08,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741835_1011 (size=405) 2024-11-23T06:38:08,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741835_1011 (size=405) 2024-11-23T06:38:08,117 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 99b65ce6157483e76ced492b2e1f6fa0, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c 2024-11-23T06:38:08,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741836_1012 (size=88) 2024-11-23T06:38:08,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741836_1012 (size=88) 2024-11-23T06:38:08,124 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:38:08,124 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 99b65ce6157483e76ced492b2e1f6fa0, disabling compactions & flushes 2024-11-23T06:38:08,124 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,124 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,124 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. after waiting 0 ms 2024-11-23T06:38:08,124 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,124 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,125 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 99b65ce6157483e76ced492b2e1f6fa0: Waiting for close lock at 1732343888124Disabling compacts and flushes for region at 1732343888124Disabling writes for close at 1732343888124Writing region close event to WAL at 1732343888124Closed at 1732343888124 2024-11-23T06:38:08,126 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T06:38:08,127 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732343888126"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343888126"}]},"ts":"1732343888126"} 2024-11-23T06:38:08,129 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T06:38:08,130 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T06:38:08,130 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343888130"}]},"ts":"1732343888130"} 2024-11-23T06:38:08,132 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-23T06:38:08,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=99b65ce6157483e76ced492b2e1f6fa0, ASSIGN}] 2024-11-23T06:38:08,134 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=99b65ce6157483e76ced492b2e1f6fa0, ASSIGN 2024-11-23T06:38:08,134 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=99b65ce6157483e76ced492b2e1f6fa0, ASSIGN; state=OFFLINE, location=df2f15951535,41363,1732343886767; forceNewPlan=false, retain=false 2024-11-23T06:38:08,286 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=99b65ce6157483e76ced492b2e1f6fa0, regionState=OPENING, regionLocation=df2f15951535,41363,1732343886767 2024-11-23T06:38:08,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=99b65ce6157483e76ced492b2e1f6fa0, ASSIGN because future has completed 2024-11-23T06:38:08,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99b65ce6157483e76ced492b2e1f6fa0, server=df2f15951535,41363,1732343886767}] 2024-11-23T06:38:08,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:08,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:08,452 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,452 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 99b65ce6157483e76ced492b2e1f6fa0, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:38:08,453 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,453 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:38:08,453 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,453 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,454 INFO [StoreOpener-99b65ce6157483e76ced492b2e1f6fa0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,456 INFO [StoreOpener-99b65ce6157483e76ced492b2e1f6fa0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99b65ce6157483e76ced492b2e1f6fa0 columnFamilyName info 2024-11-23T06:38:08,456 DEBUG [StoreOpener-99b65ce6157483e76ced492b2e1f6fa0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:38:08,456 INFO [StoreOpener-99b65ce6157483e76ced492b2e1f6fa0-1 {}] regionserver.HStore(327): Store=99b65ce6157483e76ced492b2e1f6fa0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:38:08,457 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,457 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,458 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,458 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,458 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,460 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,462 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:38:08,462 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 99b65ce6157483e76ced492b2e1f6fa0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795499, jitterRate=0.011529684066772461}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:38:08,462 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:08,463 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 99b65ce6157483e76ced492b2e1f6fa0: Running coprocessor pre-open hook at 1732343888453Writing region info on filesystem at 1732343888453Initializing all the Stores at 1732343888454 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343888454Cleaning up temporary data from old regions at 1732343888458 (+4 ms)Running coprocessor post-open hooks at 1732343888462 (+4 ms)Region opened successfully at 1732343888463 (+1 ms) 2024-11-23T06:38:08,464 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0., pid=6, masterSystemTime=1732343888447 2024-11-23T06:38:08,466 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,466 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:08,467 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=99b65ce6157483e76ced492b2e1f6fa0, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,41363,1732343886767 2024-11-23T06:38:08,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99b65ce6157483e76ced492b2e1f6fa0, server=df2f15951535,41363,1732343886767 because future has completed 2024-11-23T06:38:08,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T06:38:08,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 99b65ce6157483e76ced492b2e1f6fa0, server=df2f15951535,41363,1732343886767 in 178 msec 2024-11-23T06:38:08,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T06:38:08,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=99b65ce6157483e76ced492b2e1f6fa0, ASSIGN in 340 msec 2024-11-23T06:38:08,476 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T06:38:08,476 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343888476"}]},"ts":"1732343888476"} 2024-11-23T06:38:08,478 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-23T06:38:08,479 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T06:38:08,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 374 msec 2024-11-23T06:38:09,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:09,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:10,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:10,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:10,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:38:10,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T06:38:10,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:38:10,908 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T06:38:10,909 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T06:38:10,909 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T06:38:10,910 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:10,910 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-23T06:38:11,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:11,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:12,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:12,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:12,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:12,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:13,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:13,432 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:38:13,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,465 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:38:13,479 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T06:38:13,480 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-23T06:38:14,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:14,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:15,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:15,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:16,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:16,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:17,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:17,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:18,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:38:18,126 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T06:38:18,126 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-23T06:38:18,130 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:18,131 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:18,135 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0., hostname=df2f15951535,41363,1732343886767, seqNum=2] 2024-11-23T06:38:18,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:18,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:18,149 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T06:38:18,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T06:38:18,151 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T06:38:18,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T06:38:18,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-23T06:38:18,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:18,315 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 99b65ce6157483e76ced492b2e1f6fa0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T06:38:18,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/8ef97a817df5402a918cee25ed52ab0b is 1080, key is row0001/info:/1732343898136/Put/seqid=0 2024-11-23T06:38:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741837_1013 (size=6033) 2024-11-23T06:38:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741837_1013 (size=6033) 2024-11-23T06:38:18,339 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/8ef97a817df5402a918cee25ed52ab0b 2024-11-23T06:38:18,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/8ef97a817df5402a918cee25ed52ab0b as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/8ef97a817df5402a918cee25ed52ab0b 2024-11-23T06:38:18,352 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/8ef97a817df5402a918cee25ed52ab0b, entries=1, sequenceid=5, filesize=5.9 K 2024-11-23T06:38:18,353 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 99b65ce6157483e76ced492b2e1f6fa0 in 38ms, sequenceid=5, compaction requested=false 2024-11-23T06:38:18,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 99b65ce6157483e76ced492b2e1f6fa0: 2024-11-23T06:38:18,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:18,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-23T06:38:18,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-23T06:38:18,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:18,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:18,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T06:38:18,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-11-23T06:38:18,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-23T06:38:19,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:19,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:20,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:20,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:21,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:21,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:22,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:22,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:23,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:23,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:24,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:24,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:25,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:25,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:26,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:26,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:27,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:27,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:28,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T06:38:28,178 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T06:38:28,186 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:28,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:28,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-23T06:38:28,189 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T06:38:28,190 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T06:38:28,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T06:38:28,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-23T06:38:28,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:28,346 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 99b65ce6157483e76ced492b2e1f6fa0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T06:38:28,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/ee8b35d86b904ceba4b7f40256a84359 is 1080, key is row0002/info:/1732343908182/Put/seqid=0 2024-11-23T06:38:28,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741838_1014 (size=6033) 2024-11-23T06:38:28,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741838_1014 (size=6033) 2024-11-23T06:38:28,359 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/ee8b35d86b904ceba4b7f40256a84359 2024-11-23T06:38:28,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/ee8b35d86b904ceba4b7f40256a84359 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/ee8b35d86b904ceba4b7f40256a84359 2024-11-23T06:38:28,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:28,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:28,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 after 68113ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:38:28,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta after 68101ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T06:38:28,371 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/ee8b35d86b904ceba4b7f40256a84359, entries=1, sequenceid=9, filesize=5.9 K 2024-11-23T06:38:28,372 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 99b65ce6157483e76ced492b2e1f6fa0 in 26ms, sequenceid=9, compaction requested=false 2024-11-23T06:38:28,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 99b65ce6157483e76ced492b2e1f6fa0: 2024-11-23T06:38:28,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:28,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-23T06:38:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-23T06:38:28,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-23T06:38:28,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-23T06:38:28,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-23T06:38:29,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:29,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:30,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:30,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:31,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:31,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:32,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:32,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:33,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:33,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:34,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:34,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:35,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:35,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:36,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:36,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:36,545 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:38:37,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:37,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-23T06:38:38,266 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T06:38:38,270 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41363%2C1732343886767.1732343918269 2024-11-23T06:38:38,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:38,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:38,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:38,278 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:38,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:38,278 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343887506 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343918269 2024-11-23T06:38:38,280 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:39567:39567)] 2024-11-23T06:38:38,280 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343887506 is not closed yet, will try archiving it next time 2024-11-23T06:38:38,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:38,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741833_1009 (size=5546) 2024-11-23T06:38:38,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741833_1009 (size=5546) 2024-11-23T06:38:38,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-23T06:38:38,285 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T06:38:38,286 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T06:38:38,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T06:38:38,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:38,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:38,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-23T06:38:38,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:38,440 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 99b65ce6157483e76ced492b2e1f6fa0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T06:38:38,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/5a0d1c41995e418a8febb8ce3cb54159 is 1080, key is row0003/info:/1732343918267/Put/seqid=0 2024-11-23T06:38:38,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741840_1016 (size=6033) 2024-11-23T06:38:38,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741840_1016 (size=6033) 2024-11-23T06:38:38,450 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/5a0d1c41995e418a8febb8ce3cb54159 2024-11-23T06:38:38,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/5a0d1c41995e418a8febb8ce3cb54159 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/5a0d1c41995e418a8febb8ce3cb54159 2024-11-23T06:38:38,461 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/5a0d1c41995e418a8febb8ce3cb54159, entries=1, sequenceid=13, filesize=5.9 K 2024-11-23T06:38:38,462 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 99b65ce6157483e76ced492b2e1f6fa0 in 23ms, sequenceid=13, compaction requested=true 2024-11-23T06:38:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 99b65ce6157483e76ced492b2e1f6fa0: 2024-11-23T06:38:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-23T06:38:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-23T06:38:38,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-23T06:38:38,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-23T06:38:38,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-23T06:38:39,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:39,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:40,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:40,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:41,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:41,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:42,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:42,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:43,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:43,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:44,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:44,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:45,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:45,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:46,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:46,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:47,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:47,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:47,999 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T06:38:47,999 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T06:38:48,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-23T06:38:48,316 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T06:38:48,316 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:38:48,318 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:38:48,318 DEBUG [Time-limited test {}] regionserver.HStore(1541): 99b65ce6157483e76ced492b2e1f6fa0/info is initiating minor compaction (all files) 2024-11-23T06:38:48,318 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:38:48,318 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:38:48,318 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 99b65ce6157483e76ced492b2e1f6fa0/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:48,318 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/8ef97a817df5402a918cee25ed52ab0b, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/ee8b35d86b904ceba4b7f40256a84359, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/5a0d1c41995e418a8febb8ce3cb54159] into tmpdir=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp, totalSize=17.7 K 2024-11-23T06:38:48,319 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 8ef97a817df5402a918cee25ed52ab0b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732343898136 2024-11-23T06:38:48,319 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ee8b35d86b904ceba4b7f40256a84359, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732343908182 2024-11-23T06:38:48,320 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5a0d1c41995e418a8febb8ce3cb54159, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732343918267 2024-11-23T06:38:48,333 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 99b65ce6157483e76ced492b2e1f6fa0#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:38:48,333 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/45656b15795945c1948891d9175bcb8a is 1080, key is row0001/info:/1732343898136/Put/seqid=0 2024-11-23T06:38:48,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741841_1017 (size=8296) 2024-11-23T06:38:48,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741841_1017 (size=8296) 2024-11-23T06:38:48,345 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/45656b15795945c1948891d9175bcb8a as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/45656b15795945c1948891d9175bcb8a 2024-11-23T06:38:48,352 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99b65ce6157483e76ced492b2e1f6fa0/info of 99b65ce6157483e76ced492b2e1f6fa0 into 45656b15795945c1948891d9175bcb8a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:38:48,352 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 99b65ce6157483e76ced492b2e1f6fa0: 2024-11-23T06:38:48,355 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41363%2C1732343886767.1732343928354 2024-11-23T06:38:48,362 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:48,363 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:48,363 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:48,363 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:48,363 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:48,363 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343918269 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343928354 2024-11-23T06:38:48,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741839_1015 (size=2520) 2024-11-23T06:38:48,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741839_1015 (size=2520) 2024-11-23T06:38:48,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:39567:39567)] 2024-11-23T06:38:48,371 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343887506 to hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/oldWALs/df2f15951535%2C41363%2C1732343886767.1732343887506 2024-11-23T06:38:48,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:48,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:38:48,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-23T06:38:48,375 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T06:38:48,376 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T06:38:48,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T06:38:48,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:48,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:48,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-23T06:38:48,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:48,529 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 99b65ce6157483e76ced492b2e1f6fa0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T06:38:48,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/cee83cd70b5d45419b8733496d904263 is 1080, key is row0000/info:/1732343928353/Put/seqid=0 2024-11-23T06:38:48,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741843_1019 (size=6033) 2024-11-23T06:38:48,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741843_1019 (size=6033) 2024-11-23T06:38:48,602 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/cee83cd70b5d45419b8733496d904263 2024-11-23T06:38:48,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/cee83cd70b5d45419b8733496d904263 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/cee83cd70b5d45419b8733496d904263 2024-11-23T06:38:48,619 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/cee83cd70b5d45419b8733496d904263, entries=1, sequenceid=18, filesize=5.9 K 2024-11-23T06:38:48,619 INFO [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 99b65ce6157483e76ced492b2e1f6fa0 in 90ms, sequenceid=18, compaction requested=false 2024-11-23T06:38:48,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 99b65ce6157483e76ced492b2e1f6fa0: 2024-11-23T06:38:48,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:48,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-23T06:38:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-23T06:38:48,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-23T06:38:48,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 245 msec 2024-11-23T06:38:48,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 253 msec 2024-11-23T06:38:49,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:49,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:50,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:50,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:51,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:51,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:52,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:52,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:53,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:53,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:53,453 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 99b65ce6157483e76ced492b2e1f6fa0, had cached 0 bytes from a total of 14329 2024-11-23T06:38:54,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:54,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:55,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:55,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:56,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:56,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:57,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:57,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45461 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-23T06:38:58,386 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T06:38:58,390 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41363%2C1732343886767.1732343938390 2024-11-23T06:38:58,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:58,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:58,400 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,400 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,400 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,400 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,400 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,401 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343928354 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343938390 2024-11-23T06:38:58,402 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39567:39567),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-23T06:38:58,402 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343928354 is not closed yet, will try archiving it next time 2024-11-23T06:38:58,402 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/WALs/df2f15951535,41363,1732343886767/df2f15951535%2C41363%2C1732343886767.1732343918269 to hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/oldWALs/df2f15951535%2C41363%2C1732343886767.1732343918269 2024-11-23T06:38:58,402 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:38:58,402 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:38:58,403 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:38:58,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:38:58,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:38:58,403 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:38:58,403 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:38:58,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741842_1018 (size=2026) 2024-11-23T06:38:58,403 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=942283080, stopped=false 2024-11-23T06:38:58,403 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,45461,1732343886561 2024-11-23T06:38:58,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741842_1018 (size=2026) 2024-11-23T06:38:58,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:38:58,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:38:58,444 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:38:58,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:58,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:58,444 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:38:58,444 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:38:58,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:38:58,445 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,41363,1732343886767' ***** 2024-11-23T06:38:58,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:38:58,445 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:38:58,446 INFO [RS:0;df2f15951535:41363 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:38:58,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:38:58,446 INFO [RS:0;df2f15951535:41363 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:38:58,446 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:38:58,446 INFO [RS:0;df2f15951535:41363 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:38:58,446 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(3091): Received CLOSE for 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:58,447 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(959): stopping server df2f15951535,41363,1732343886767 2024-11-23T06:38:58,447 INFO [RS:0;df2f15951535:41363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:38:58,447 INFO [RS:0;df2f15951535:41363 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:41363. 2024-11-23T06:38:58,447 DEBUG [RS:0;df2f15951535:41363 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:38:58,447 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 99b65ce6157483e76ced492b2e1f6fa0, disabling compactions & flushes 2024-11-23T06:38:58,447 DEBUG [RS:0;df2f15951535:41363 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:38:58,447 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:58,447 INFO [RS:0;df2f15951535:41363 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:38:58,447 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:58,447 INFO [RS:0;df2f15951535:41363 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:38:58,448 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. after waiting 0 ms 2024-11-23T06:38:58,448 INFO [RS:0;df2f15951535:41363 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:38:58,448 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:58,448 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:38:58,448 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 99b65ce6157483e76ced492b2e1f6fa0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T06:38:58,448 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T06:38:58,448 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 99b65ce6157483e76ced492b2e1f6fa0=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.} 2024-11-23T06:38:58,449 DEBUG [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 99b65ce6157483e76ced492b2e1f6fa0 2024-11-23T06:38:58,448 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:38:58,449 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:38:58,449 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:38:58,449 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:38:58,449 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:38:58,449 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-23T06:38:58,460 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/9524753475d248958e10f246ae0fd521 is 1080, key is row0001/info:/1732343938388/Put/seqid=0 2024-11-23T06:38:58,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741845_1021 (size=6033) 2024-11-23T06:38:58,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741845_1021 (size=6033) 2024-11-23T06:38:58,465 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/9524753475d248958e10f246ae0fd521 2024-11-23T06:38:58,469 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/info/d79411ff0df8487eb44638d5f96cb4b2 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0./info:regioninfo/1732343888467/Put/seqid=0 2024-11-23T06:38:58,472 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/.tmp/info/9524753475d248958e10f246ae0fd521 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/9524753475d248958e10f246ae0fd521 2024-11-23T06:38:58,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741846_1022 (size=7308) 2024-11-23T06:38:58,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741846_1022 (size=7308) 2024-11-23T06:38:58,475 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/info/d79411ff0df8487eb44638d5f96cb4b2 2024-11-23T06:38:58,479 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/9524753475d248958e10f246ae0fd521, entries=1, sequenceid=22, filesize=5.9 K 2024-11-23T06:38:58,480 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 99b65ce6157483e76ced492b2e1f6fa0 in 32ms, sequenceid=22, compaction requested=true 2024-11-23T06:38:58,480 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/8ef97a817df5402a918cee25ed52ab0b, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/ee8b35d86b904ceba4b7f40256a84359, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/5a0d1c41995e418a8febb8ce3cb54159] to archive 2024-11-23T06:38:58,481 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T06:38:58,483 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/8ef97a817df5402a918cee25ed52ab0b to hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/8ef97a817df5402a918cee25ed52ab0b 2024-11-23T06:38:58,484 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/ee8b35d86b904ceba4b7f40256a84359 to hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/ee8b35d86b904ceba4b7f40256a84359 2024-11-23T06:38:58,485 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/5a0d1c41995e418a8febb8ce3cb54159 to hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/info/5a0d1c41995e418a8febb8ce3cb54159 2024-11-23T06:38:58,485 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=df2f15951535:45461 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T06:38:58,485 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [8ef97a817df5402a918cee25ed52ab0b=6033, ee8b35d86b904ceba4b7f40256a84359=6033, 5a0d1c41995e418a8febb8ce3cb54159=6033] 2024-11-23T06:38:58,489 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/99b65ce6157483e76ced492b2e1f6fa0/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-23T06:38:58,490 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:58,490 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 99b65ce6157483e76ced492b2e1f6fa0: Waiting for close lock at 1732343938447Running coprocessor pre-close hooks at 1732343938447Disabling compacts and flushes for region at 1732343938447Disabling writes for close at 1732343938448 (+1 ms)Obtaining lock to block concurrent updates at 1732343938448Preparing flush snapshotting stores in 99b65ce6157483e76ced492b2e1f6fa0 at 1732343938448Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732343938449 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. at 1732343938457 (+8 ms)Flushing 99b65ce6157483e76ced492b2e1f6fa0/info: creating writer at 1732343938457Flushing 99b65ce6157483e76ced492b2e1f6fa0/info: appending metadata at 1732343938459 (+2 ms)Flushing 99b65ce6157483e76ced492b2e1f6fa0/info: closing flushed file at 1732343938459Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7352433d: reopening flushed file at 1732343938471 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 99b65ce6157483e76ced492b2e1f6fa0 in 32ms, sequenceid=22, compaction requested=true at 1732343938480 (+9 ms)Writing region close event to WAL at 1732343938486 (+6 ms)Running coprocessor post-close hooks at 1732343938489 (+3 ms)Closed at 1732343938490 (+1 ms) 2024-11-23T06:38:58,490 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732343888105.99b65ce6157483e76ced492b2e1f6fa0. 2024-11-23T06:38:58,496 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/ns/2f3290645c804bf78f3f5c96b80342f3 is 43, key is default/ns:d/1732343887978/Put/seqid=0 2024-11-23T06:38:58,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741847_1023 (size=5153) 2024-11-23T06:38:58,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741847_1023 (size=5153) 2024-11-23T06:38:58,501 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/ns/2f3290645c804bf78f3f5c96b80342f3 2024-11-23T06:38:58,519 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/table/2d7fbb124f15460a9bce6d6f019e2782 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732343888476/Put/seqid=0 2024-11-23T06:38:58,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741848_1024 (size=5508) 2024-11-23T06:38:58,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741848_1024 (size=5508) 2024-11-23T06:38:58,524 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/table/2d7fbb124f15460a9bce6d6f019e2782 2024-11-23T06:38:58,529 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/info/d79411ff0df8487eb44638d5f96cb4b2 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/info/d79411ff0df8487eb44638d5f96cb4b2 2024-11-23T06:38:58,535 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/info/d79411ff0df8487eb44638d5f96cb4b2, entries=10, sequenceid=11, filesize=7.1 K 2024-11-23T06:38:58,535 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/ns/2f3290645c804bf78f3f5c96b80342f3 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/ns/2f3290645c804bf78f3f5c96b80342f3 2024-11-23T06:38:58,541 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/ns/2f3290645c804bf78f3f5c96b80342f3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T06:38:58,541 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/.tmp/table/2d7fbb124f15460a9bce6d6f019e2782 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/table/2d7fbb124f15460a9bce6d6f019e2782 2024-11-23T06:38:58,547 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/table/2d7fbb124f15460a9bce6d6f019e2782, entries=2, sequenceid=11, filesize=5.4 K 2024-11-23T06:38:58,548 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-23T06:38:58,551 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T06:38:58,552 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:38:58,552 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:38:58,552 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343938448Running coprocessor pre-close hooks at 1732343938448Disabling compacts and flushes for region at 1732343938448Disabling writes for close at 1732343938449 (+1 ms)Obtaining lock to block concurrent updates at 1732343938449Preparing flush snapshotting stores in 1588230740 at 1732343938449Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732343938450 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732343938451 (+1 ms)Flushing 1588230740/info: creating writer at 1732343938452 (+1 ms)Flushing 1588230740/info: appending metadata at 1732343938469 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732343938469Flushing 1588230740/ns: creating writer at 1732343938481 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732343938496 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732343938496Flushing 1588230740/table: creating writer at 1732343938506 (+10 ms)Flushing 1588230740/table: appending metadata at 1732343938518 (+12 ms)Flushing 1588230740/table: closing flushed file at 1732343938518Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c4afdb8: reopening flushed file at 1732343938529 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5aadcbdd: reopening flushed file at 1732343938535 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fb66e61: reopening flushed file at 1732343938541 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1732343938548 (+7 ms)Writing region close event to WAL at 1732343938548Running coprocessor post-close hooks at 1732343938552 (+4 ms)Closed at 1732343938552 2024-11-23T06:38:58,552 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:38:58,649 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(976): stopping server df2f15951535,41363,1732343886767; all regions closed. 2024-11-23T06:38:58,649 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,649 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,649 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,650 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,650 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741834_1010 (size=3306) 2024-11-23T06:38:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741834_1010 (size=3306) 2024-11-23T06:38:58,654 DEBUG [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/oldWALs 2024-11-23T06:38:58,654 INFO [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C41363%2C1732343886767.meta:.meta(num 1732343887893) 2024-11-23T06:38:58,655 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,655 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,655 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,655 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,655 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741844_1020 (size=1252) 2024-11-23T06:38:58,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741844_1020 (size=1252) 2024-11-23T06:38:58,663 DEBUG [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/oldWALs 2024-11-23T06:38:58,663 INFO [RS:0;df2f15951535:41363 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C41363%2C1732343886767:(num 1732343938390) 2024-11-23T06:38:58,663 DEBUG [RS:0;df2f15951535:41363 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:38:58,663 INFO [RS:0;df2f15951535:41363 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:38:58,663 INFO [RS:0;df2f15951535:41363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:38:58,663 INFO [RS:0;df2f15951535:41363 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:38:58,663 INFO [RS:0;df2f15951535:41363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:38:58,663 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:38:58,664 INFO [RS:0;df2f15951535:41363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41363 2024-11-23T06:38:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,41363,1732343886767 2024-11-23T06:38:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:38:58,675 INFO [RS:0;df2f15951535:41363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:38:58,689 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,41363,1732343886767] 2024-11-23T06:38:58,700 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,41363,1732343886767 already deleted, retry=false 2024-11-23T06:38:58,700 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,41363,1732343886767 expired; onlineServers=0 2024-11-23T06:38:58,700 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,45461,1732343886561' ***** 2024-11-23T06:38:58,700 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:38:58,700 INFO [M:0;df2f15951535:45461 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:38:58,700 INFO [M:0;df2f15951535:45461 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:38:58,700 DEBUG [M:0;df2f15951535:45461 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:38:58,700 DEBUG [M:0;df2f15951535:45461 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:38:58,700 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:38:58,700 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343887227 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343887227,5,FailOnTimeoutGroup] 2024-11-23T06:38:58,700 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343887227 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343887227,5,FailOnTimeoutGroup] 2024-11-23T06:38:58,700 INFO [M:0;df2f15951535:45461 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:38:58,701 INFO [M:0;df2f15951535:45461 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:38:58,701 DEBUG [M:0;df2f15951535:45461 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:38:58,701 INFO [M:0;df2f15951535:45461 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:38:58,701 INFO [M:0;df2f15951535:45461 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:38:58,701 INFO [M:0;df2f15951535:45461 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:38:58,701 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:38:58,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:38:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:38:58,711 DEBUG [M:0;df2f15951535:45461 {}] zookeeper.ZKUtil(347): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:38:58,711 WARN [M:0;df2f15951535:45461 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:38:58,711 INFO [M:0;df2f15951535:45461 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/.lastflushedseqids 2024-11-23T06:38:58,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741849_1025 (size=130) 2024-11-23T06:38:58,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741849_1025 (size=130) 2024-11-23T06:38:58,717 INFO [M:0;df2f15951535:45461 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:38:58,717 INFO [M:0;df2f15951535:45461 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:38:58,717 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:38:58,717 INFO [M:0;df2f15951535:45461 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:58,717 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:58,717 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:38:58,717 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:58,717 INFO [M:0;df2f15951535:45461 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.94 KB 2024-11-23T06:38:58,734 DEBUG [M:0;df2f15951535:45461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/29e9ff973566492d9d3770964bab3274 is 82, key is hbase:meta,,1/info:regioninfo/1732343887921/Put/seqid=0 2024-11-23T06:38:58,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741850_1026 (size=5672) 2024-11-23T06:38:58,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741850_1026 (size=5672) 2024-11-23T06:38:58,739 INFO [M:0;df2f15951535:45461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/29e9ff973566492d9d3770964bab3274 2024-11-23T06:38:58,763 DEBUG [M:0;df2f15951535:45461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5fc1f94b8aec40ebac94ab9a29b7b9c7 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732343888481/Put/seqid=0 2024-11-23T06:38:58,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741851_1027 (size=7819) 2024-11-23T06:38:58,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741851_1027 (size=7819) 2024-11-23T06:38:58,768 INFO [M:0;df2f15951535:45461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5fc1f94b8aec40ebac94ab9a29b7b9c7 2024-11-23T06:38:58,772 INFO [M:0;df2f15951535:45461 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5fc1f94b8aec40ebac94ab9a29b7b9c7 2024-11-23T06:38:58,787 DEBUG [M:0;df2f15951535:45461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08160df9ed3c48a2a0b7629b8d383729 is 69, key is df2f15951535,41363,1732343886767/rs:state/1732343887350/Put/seqid=0 2024-11-23T06:38:58,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:58,789 INFO [RS:0;df2f15951535:41363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:38:58,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41363-0x1016669ee7e0001, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:58,790 INFO [RS:0;df2f15951535:41363 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,41363,1732343886767; zookeeper connection closed. 2024-11-23T06:38:58,790 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7ee8e1f9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7ee8e1f9 2024-11-23T06:38:58,790 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T06:38:58,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741852_1028 (size=5156) 2024-11-23T06:38:58,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741852_1028 (size=5156) 2024-11-23T06:38:58,793 INFO [M:0;df2f15951535:45461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08160df9ed3c48a2a0b7629b8d383729 2024-11-23T06:38:58,811 DEBUG [M:0;df2f15951535:45461 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c18d045a0d7486fa40a8ab5f2b19251 is 52, key is load_balancer_on/state:d/1732343888101/Put/seqid=0 2024-11-23T06:38:58,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741853_1029 (size=5056) 2024-11-23T06:38:58,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741853_1029 (size=5056) 2024-11-23T06:38:58,815 INFO [M:0;df2f15951535:45461 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c18d045a0d7486fa40a8ab5f2b19251 2024-11-23T06:38:58,820 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/29e9ff973566492d9d3770964bab3274 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/29e9ff973566492d9d3770964bab3274 2024-11-23T06:38:58,825 INFO [M:0;df2f15951535:45461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/29e9ff973566492d9d3770964bab3274, entries=8, sequenceid=121, filesize=5.5 K 2024-11-23T06:38:58,826 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5fc1f94b8aec40ebac94ab9a29b7b9c7 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5fc1f94b8aec40ebac94ab9a29b7b9c7 2024-11-23T06:38:58,830 INFO [M:0;df2f15951535:45461 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5fc1f94b8aec40ebac94ab9a29b7b9c7 2024-11-23T06:38:58,830 INFO [M:0;df2f15951535:45461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5fc1f94b8aec40ebac94ab9a29b7b9c7, entries=14, sequenceid=121, filesize=7.6 K 2024-11-23T06:38:58,831 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08160df9ed3c48a2a0b7629b8d383729 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08160df9ed3c48a2a0b7629b8d383729 2024-11-23T06:38:58,836 INFO [M:0;df2f15951535:45461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08160df9ed3c48a2a0b7629b8d383729, entries=1, sequenceid=121, filesize=5.0 K 2024-11-23T06:38:58,837 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c18d045a0d7486fa40a8ab5f2b19251 as hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8c18d045a0d7486fa40a8ab5f2b19251 2024-11-23T06:38:58,843 INFO [M:0;df2f15951535:45461 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35207/user/jenkins/test-data/08d3ca3a-5677-eddd-09dd-fd8e439cea7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8c18d045a0d7486fa40a8ab5f2b19251, entries=1, sequenceid=121, filesize=4.9 K 2024-11-23T06:38:58,844 INFO [M:0;df2f15951535:45461 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=121, compaction requested=false 2024-11-23T06:38:58,845 INFO [M:0;df2f15951535:45461 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:38:58,845 DEBUG [M:0;df2f15951535:45461 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343938717Disabling compacts and flushes for region at 1732343938717Disabling writes for close at 1732343938717Obtaining lock to block concurrent updates at 1732343938717Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343938717Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44599, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1732343938718 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343938718Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343938719 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343938734 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343938734Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343938743 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343938762 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343938762Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343938772 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343938787 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343938787Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343938797 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343938810 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343938810Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1116fab5: reopening flushed file at 1732343938820 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68c86d53: reopening flushed file at 1732343938825 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39e05ea0: reopening flushed file at 1732343938831 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@177170fb: reopening flushed file at 1732343938836 (+5 ms)Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=121, compaction requested=false at 1732343938844 (+8 ms)Writing region close event to WAL at 1732343938845 (+1 ms)Closed at 1732343938845 2024-11-23T06:38:58,846 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,846 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,846 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,846 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,846 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:38:58,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35075 is added to blk_1073741830_1006 (size=52996) 2024-11-23T06:38:58,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44847 is added to blk_1073741830_1006 (size=52996) 2024-11-23T06:38:58,848 INFO [M:0;df2f15951535:45461 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:38:58,848 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:38:58,848 INFO [M:0;df2f15951535:45461 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45461 2024-11-23T06:38:58,849 INFO [M:0;df2f15951535:45461 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:38:58,958 INFO [M:0;df2f15951535:45461 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:38:58,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:58,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45461-0x1016669ee7e0000, quorum=127.0.0.1:54875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:38:58,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7009eb0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:58,960 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38d3f6f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:38:58,960 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:38:58,961 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e2a30ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:38:58,961 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f411ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir/,STOPPED} 2024-11-23T06:38:58,963 WARN [BP-792531804-172.17.0.3-1732343884264 heartbeating to localhost/127.0.0.1:35207 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:38:58,963 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:38:58,963 WARN [BP-792531804-172.17.0.3-1732343884264 heartbeating to localhost/127.0.0.1:35207 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-792531804-172.17.0.3-1732343884264 (Datanode Uuid a204d104-e9a2-49bb-b0af-9590785b25e1) service to localhost/127.0.0.1:35207 2024-11-23T06:38:58,963 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:38:58,963 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data3/current/BP-792531804-172.17.0.3-1732343884264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:58,964 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data4/current/BP-792531804-172.17.0.3-1732343884264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:58,964 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:38:58,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@278dab99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:58,969 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a6c1f86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:38:58,970 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:38:58,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a2ef153{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:38:58,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74b5ebca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir/,STOPPED} 2024-11-23T06:38:58,971 WARN [BP-792531804-172.17.0.3-1732343884264 heartbeating to localhost/127.0.0.1:35207 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:38:58,971 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:38:58,971 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:38:58,971 WARN [BP-792531804-172.17.0.3-1732343884264 heartbeating to localhost/127.0.0.1:35207 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-792531804-172.17.0.3-1732343884264 (Datanode Uuid 31c67cb7-da48-414d-838c-01ef83274af3) service to localhost/127.0.0.1:35207 2024-11-23T06:38:58,972 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data1/current/BP-792531804-172.17.0.3-1732343884264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:58,972 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/cluster_ac6108b2-9839-3dd4-f170-d5c1936c3319/data/data2/current/BP-792531804-172.17.0.3-1732343884264 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:38:58,972 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:38:58,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@611cc51f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:38:58,978 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@77877788{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:38:58,979 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:38:58,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8d4c846{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:38:58,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc3ea71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir/,STOPPED} 2024-11-23T06:38:58,984 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:38:59,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:38:59,011 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 180) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35207 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35207 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35207 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35207 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35207 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35207 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=101 (was 117), ProcessCount=11 (was 11), AvailableMemoryMB=7223 (was 7640) 2024-11-23T06:38:59,018 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=101, ProcessCount=11, AvailableMemoryMB=7223 2024-11-23T06:38:59,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:38:59,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.log.dir so I do NOT create it in target/test-data/61166cb9-2035-71e0-5f9c-15075530574c 2024-11-23T06:38:59,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c0b8d806-de7c-1a46-3238-3d19bc08b2a3/hadoop.tmp.dir so I do NOT create it in target/test-data/61166cb9-2035-71e0-5f9c-15075530574c 2024-11-23T06:38:59,018 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0, deleteOnExit=true 2024-11-23T06:38:59,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/test.cache.data in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:38:59,019 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:38:59,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:38:59,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:38:59,033 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:38:59,374 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:38:59,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:59,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:38:59,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:59,507 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:38:59,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:38:59,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:38:59,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:38:59,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:59,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f8818bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:38:59,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75b3fca0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:38:59,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e6f692{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/java.io.tmpdir/jetty-localhost-38971-hadoop-hdfs-3_4_1-tests_jar-_-any-8395152496219648980/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:38:59,602 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45bda0cb{HTTP/1.1, (http/1.1)}{localhost:38971} 2024-11-23T06:38:59,602 INFO [Time-limited test {}] server.Server(415): Started @245599ms 2024-11-23T06:38:59,613 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:38:59,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:38:59,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:38:59,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:38:59,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:38:59,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:38:59,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1724ca70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:38:59,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2001df3c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:38:59,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49059d65{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/java.io.tmpdir/jetty-localhost-32867-hadoop-hdfs-3_4_1-tests_jar-_-any-7695168788488445303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:38:59,991 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@417c1a7a{HTTP/1.1, (http/1.1)}{localhost:32867} 2024-11-23T06:38:59,991 INFO [Time-limited test {}] server.Server(415): Started @245988ms 2024-11-23T06:38:59,992 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:39:00,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:39:00,020 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:39:00,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:39:00,021 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:39:00,021 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:39:00,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70aed17c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:39:00,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@692ba77d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:39:00,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f81fda7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/java.io.tmpdir/jetty-localhost-43791-hadoop-hdfs-3_4_1-tests_jar-_-any-9238829413627956600/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:00,116 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41c54b7e{HTTP/1.1, (http/1.1)}{localhost:43791} 2024-11-23T06:39:00,116 INFO [Time-limited test {}] server.Server(415): Started @246113ms 2024-11-23T06:39:00,117 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:39:00,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:00,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:00,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:39:00,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:39:00,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T06:39:00,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T06:39:01,294 WARN [Thread-1959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data1/current/BP-1218950709-172.17.0.3-1732343939037/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:01,294 WARN [Thread-1960 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data2/current/BP-1218950709-172.17.0.3-1732343939037/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:01,311 WARN [Thread-1923 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:39:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c60080025ec8004 with lease ID 0x61bc74611349acd0: Processing first storage report for DS-f9655dbd-a965-43a0-b1e1-04e9ad17933c from datanode DatanodeRegistration(127.0.0.1:41329, datanodeUuid=fde251bd-1f37-478b-8f4e-c99e189d5067, infoPort=36649, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037) 2024-11-23T06:39:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c60080025ec8004 with lease ID 0x61bc74611349acd0: from storage DS-f9655dbd-a965-43a0-b1e1-04e9ad17933c node DatanodeRegistration(127.0.0.1:41329, datanodeUuid=fde251bd-1f37-478b-8f4e-c99e189d5067, infoPort=36649, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c60080025ec8004 with lease ID 0x61bc74611349acd0: Processing first storage report for DS-be039fe1-77eb-49de-8812-18ac3ee6a6ae from datanode DatanodeRegistration(127.0.0.1:41329, datanodeUuid=fde251bd-1f37-478b-8f4e-c99e189d5067, infoPort=36649, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037) 2024-11-23T06:39:01,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c60080025ec8004 with lease ID 0x61bc74611349acd0: from storage DS-be039fe1-77eb-49de-8812-18ac3ee6a6ae node DatanodeRegistration(127.0.0.1:41329, datanodeUuid=fde251bd-1f37-478b-8f4e-c99e189d5067, infoPort=36649, infoSecurePort=0, ipcPort=44633, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:01,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:01,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:01,454 WARN [Thread-1970 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data3/current/BP-1218950709-172.17.0.3-1732343939037/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:01,455 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data4/current/BP-1218950709-172.17.0.3-1732343939037/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:01,475 WARN [Thread-1946 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:39:01,477 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65b999df5e912e8b with lease ID 0x61bc74611349acd1: Processing first storage report for DS-47ca2eff-fef7-4647-9aba-e6e2d9b68143 from datanode DatanodeRegistration(127.0.0.1:46521, datanodeUuid=1df8e296-d8db-4b3f-9726-c0c67368dab9, infoPort=37563, infoSecurePort=0, ipcPort=44247, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037) 2024-11-23T06:39:01,477 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65b999df5e912e8b with lease ID 0x61bc74611349acd1: from storage DS-47ca2eff-fef7-4647-9aba-e6e2d9b68143 node DatanodeRegistration(127.0.0.1:46521, datanodeUuid=1df8e296-d8db-4b3f-9726-c0c67368dab9, infoPort=37563, infoSecurePort=0, ipcPort=44247, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:01,477 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65b999df5e912e8b with lease ID 0x61bc74611349acd1: Processing first storage report for DS-552ab60c-faf0-4b59-99b8-8af508187b06 from datanode DatanodeRegistration(127.0.0.1:46521, datanodeUuid=1df8e296-d8db-4b3f-9726-c0c67368dab9, infoPort=37563, infoSecurePort=0, ipcPort=44247, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037) 2024-11-23T06:39:01,477 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65b999df5e912e8b with lease ID 0x61bc74611349acd1: from storage DS-552ab60c-faf0-4b59-99b8-8af508187b06 node DatanodeRegistration(127.0.0.1:46521, datanodeUuid=1df8e296-d8db-4b3f-9726-c0c67368dab9, infoPort=37563, infoSecurePort=0, ipcPort=44247, storageInfo=lv=-57;cid=testClusterID;nsid=785371352;c=1732343939037), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:01,549 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c 2024-11-23T06:39:01,552 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/zookeeper_0, clientPort=56103, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:39:01,553 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56103 2024-11-23T06:39:01,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,555 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:39:01,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:39:01,566 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5 with version=8 2024-11-23T06:39:01,566 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:39:01,569 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:39:01,569 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:39:01,570 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44159 2024-11-23T06:39:01,573 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44159 connecting to ZooKeeper ensemble=127.0.0.1:56103 2024-11-23T06:39:01,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441590x0, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:39:01,651 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44159-0x101666ac55b0000 connected 2024-11-23T06:39:01,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,745 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:01,745 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5, hbase.cluster.distributed=false 2024-11-23T06:39:01,747 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:39:01,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44159 2024-11-23T06:39:01,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44159 2024-11-23T06:39:01,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44159 2024-11-23T06:39:01,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44159 2024-11-23T06:39:01,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44159 2024-11-23T06:39:01,765 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:39:01,765 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:39:01,766 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34775 2024-11-23T06:39:01,767 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34775 connecting to ZooKeeper ensemble=127.0.0.1:56103 2024-11-23T06:39:01,768 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,769 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347750x0, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:39:01,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:01,780 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34775-0x101666ac55b0001 connected 2024-11-23T06:39:01,780 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:39:01,781 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:39:01,781 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:39:01,782 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:39:01,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34775 2024-11-23T06:39:01,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34775 2024-11-23T06:39:01,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34775 2024-11-23T06:39:01,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34775 2024-11-23T06:39:01,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34775 2024-11-23T06:39:01,798 DEBUG [M:0;df2f15951535:44159 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:44159 2024-11-23T06:39:01,798 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,44159,1732343941568 2024-11-23T06:39:01,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:01,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:01,812 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,44159,1732343941568 2024-11-23T06:39:01,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:39:01,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:01,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:01,823 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:39:01,823 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,44159,1732343941568 from backup master directory 2024-11-23T06:39:01,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,44159,1732343941568 2024-11-23T06:39:01,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:01,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:01,836 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:39:01,836 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,44159,1732343941568 2024-11-23T06:39:01,840 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/hbase.id] with ID: decfba6a-3101-4b4f-bc16-6b7e0703a63a 2024-11-23T06:39:01,840 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/.tmp/hbase.id 2024-11-23T06:39:01,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:39:01,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:39:01,846 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/.tmp/hbase.id]:[hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/hbase.id] 2024-11-23T06:39:01,857 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:01,857 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:39:01,858 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-23T06:39:01,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:01,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:01,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:39:01,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:39:01,891 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:39:01,892 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:39:01,892 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:01,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:39:01,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:39:01,899 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store 2024-11-23T06:39:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:39:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:39:02,307 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:02,307 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:39:02,307 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:02,307 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:02,307 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:39:02,307 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:02,307 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:02,307 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343942307Disabling compacts and flushes for region at 1732343942307Disabling writes for close at 1732343942307Writing region close event to WAL at 1732343942307Closed at 1732343942307 2024-11-23T06:39:02,309 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/.initializing 2024-11-23T06:39:02,309 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/WALs/df2f15951535,44159,1732343941568 2024-11-23T06:39:02,313 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C44159%2C1732343941568, suffix=, logDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/WALs/df2f15951535,44159,1732343941568, archiveDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/oldWALs, maxLogs=10 2024-11-23T06:39:02,314 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C44159%2C1732343941568.1732343942313 2024-11-23T06:39:02,320 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/WALs/df2f15951535,44159,1732343941568/df2f15951535%2C44159%2C1732343941568.1732343942313 2024-11-23T06:39:02,321 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37563:37563),(127.0.0.1/127.0.0.1:36649:36649)] 2024-11-23T06:39:02,322 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:39:02,323 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:02,323 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,323 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,324 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:39:02,327 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:02,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,330 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:39:02,330 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:02,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:39:02,332 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:02,333 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:39:02,334 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:02,334 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,335 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,335 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,336 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,336 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,336 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:39:02,337 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:02,339 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:39:02,339 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812643, jitterRate=0.03332936763763428}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:39:02,340 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343942323Initializing all the Stores at 1732343942324 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343942324Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343942324Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343942324Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343942324Cleaning up temporary data from old regions at 1732343942336 (+12 ms)Region opened successfully at 1732343942340 (+4 ms) 2024-11-23T06:39:02,340 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:39:02,342 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@668dd221, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:39:02,343 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:39:02,343 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:39:02,343 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:39:02,343 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:39:02,344 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T06:39:02,344 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T06:39:02,344 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:39:02,346 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:39:02,346 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:39:02,358 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:39:02,359 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:39:02,359 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:39:02,369 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:39:02,369 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:39:02,370 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:39:02,379 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:39:02,380 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:39:02,390 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:39:02,392 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:39:02,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:02,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:02,400 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:39:02,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:02,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:02,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:02,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:02,412 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,44159,1732343941568, sessionid=0x101666ac55b0000, setting cluster-up flag (Was=false) 2024-11-23T06:39:02,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:02,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:02,468 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:39:02,469 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,44159,1732343941568 2024-11-23T06:39:02,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:02,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:02,542 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:39:02,543 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,44159,1732343941568 2024-11-23T06:39:02,545 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:39:02,547 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:02,547 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:39:02,547 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:39:02,547 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,44159,1732343941568 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:39:02,549 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:02,549 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:02,549 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:02,549 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:02,549 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:39:02,549 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,550 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:39:02,550 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732343972551 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:39:02,551 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,552 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:02,552 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:39:02,552 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:39:02,552 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:39:02,552 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:39:02,552 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:39:02,552 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:39:02,553 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343942553,5,FailOnTimeoutGroup] 2024-11-23T06:39:02,553 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343942553,5,FailOnTimeoutGroup] 2024-11-23T06:39:02,553 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,553 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:39:02,553 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,553 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,553 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,553 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:39:02,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:39:02,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:39:02,561 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:39:02,561 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5 2024-11-23T06:39:02,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:39:02,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:39:02,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:02,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:39:02,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:39:02,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:02,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:39:02,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:39:02,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:02,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:39:02,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:39:02,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:02,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:39:02,575 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:39:02,575 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:02,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:02,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:39:02,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740 2024-11-23T06:39:02,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740 2024-11-23T06:39:02,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:39:02,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:39:02,577 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:39:02,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:39:02,580 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:39:02,580 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778372, jitterRate=-0.010249033570289612}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:39:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343942568Initializing all the Stores at 1732343942569 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343942569Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343942569Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343942569Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343942569Cleaning up temporary data from old regions at 1732343942577 (+8 ms)Region opened successfully at 1732343942580 (+3 ms) 2024-11-23T06:39:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:39:02,580 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:39:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:39:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:39:02,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:39:02,581 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:39:02,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343942580Disabling compacts and flushes for region at 1732343942580Disabling writes for close at 1732343942580Writing region close event to WAL at 1732343942581 (+1 ms)Closed at 1732343942581 2024-11-23T06:39:02,582 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:02,582 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:39:02,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:39:02,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:39:02,584 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:39:02,586 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(746): ClusterId : decfba6a-3101-4b4f-bc16-6b7e0703a63a 2024-11-23T06:39:02,586 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:39:02,595 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:39:02,595 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:39:02,605 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:39:02,606 DEBUG [RS:0;df2f15951535:34775 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44455fc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:39:02,616 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:34775 2024-11-23T06:39:02,616 INFO [RS:0;df2f15951535:34775 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:39:02,617 INFO [RS:0;df2f15951535:34775 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:39:02,617 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:39:02,617 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,44159,1732343941568 with port=34775, startcode=1732343941765 2024-11-23T06:39:02,617 DEBUG [RS:0;df2f15951535:34775 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:39:02,619 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:39:02,620 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44159 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,34775,1732343941765 2024-11-23T06:39:02,620 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44159 {}] master.ServerManager(517): Registering regionserver=df2f15951535,34775,1732343941765 2024-11-23T06:39:02,621 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5 2024-11-23T06:39:02,621 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46567 2024-11-23T06:39:02,621 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:39:02,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:39:02,633 DEBUG [RS:0;df2f15951535:34775 {}] zookeeper.ZKUtil(111): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,34775,1732343941765 2024-11-23T06:39:02,633 WARN [RS:0;df2f15951535:34775 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:39:02,633 INFO [RS:0;df2f15951535:34775 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:02,633 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765 2024-11-23T06:39:02,633 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,34775,1732343941765] 2024-11-23T06:39:02,636 INFO [RS:0;df2f15951535:34775 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:39:02,638 INFO [RS:0;df2f15951535:34775 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:39:02,638 INFO [RS:0;df2f15951535:34775 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:39:02,638 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,638 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:39:02,639 INFO [RS:0;df2f15951535:34775 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:39:02,639 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:39:02,639 DEBUG [RS:0;df2f15951535:34775 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:39:02,640 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,640 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,640 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,640 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,640 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,640 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,34775,1732343941765-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:39:02,654 INFO [RS:0;df2f15951535:34775 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:39:02,654 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,34775,1732343941765-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,654 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,654 INFO [RS:0;df2f15951535:34775 {}] regionserver.Replication(171): df2f15951535,34775,1732343941765 started 2024-11-23T06:39:02,667 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:02,667 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,34775,1732343941765, RpcServer on df2f15951535/172.17.0.3:34775, sessionid=0x101666ac55b0001 2024-11-23T06:39:02,668 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:39:02,668 DEBUG [RS:0;df2f15951535:34775 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,34775,1732343941765 2024-11-23T06:39:02,668 DEBUG [RS:0;df2f15951535:34775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,34775,1732343941765' 2024-11-23T06:39:02,668 DEBUG [RS:0;df2f15951535:34775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:39:02,668 DEBUG [RS:0;df2f15951535:34775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:39:02,669 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:39:02,669 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:39:02,669 DEBUG [RS:0;df2f15951535:34775 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,34775,1732343941765 2024-11-23T06:39:02,669 DEBUG [RS:0;df2f15951535:34775 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,34775,1732343941765' 2024-11-23T06:39:02,669 DEBUG [RS:0;df2f15951535:34775 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:39:02,669 DEBUG [RS:0;df2f15951535:34775 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:39:02,670 DEBUG [RS:0;df2f15951535:34775 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:39:02,670 INFO [RS:0;df2f15951535:34775 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:39:02,670 INFO [RS:0;df2f15951535:34775 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:39:02,734 WARN [df2f15951535:44159 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T06:39:02,772 INFO [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C34775%2C1732343941765, suffix=, logDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765, archiveDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/oldWALs, maxLogs=32 2024-11-23T06:39:02,772 INFO [RS:0;df2f15951535:34775 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C34775%2C1732343941765.1732343942772 2024-11-23T06:39:02,778 INFO [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343942772 2024-11-23T06:39:02,779 DEBUG [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36649:36649),(127.0.0.1/127.0.0.1:37563:37563)] 2024-11-23T06:39:02,984 DEBUG [df2f15951535:44159 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:39:02,986 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:02,989 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,34775,1732343941765, state=OPENING 2024-11-23T06:39:03,047 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:39:03,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:03,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:03,068 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:39:03,068 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:03,068 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:03,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,34775,1732343941765}] 2024-11-23T06:39:03,220 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:39:03,222 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36765, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:39:03,226 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:39:03,226 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:03,228 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C34775%2C1732343941765.meta, suffix=.meta, logDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765, archiveDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/oldWALs, maxLogs=32 2024-11-23T06:39:03,228 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C34775%2C1732343941765.meta.1732343943228.meta 2024-11-23T06:39:03,233 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.meta.1732343943228.meta 2024-11-23T06:39:03,234 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37563:37563),(127.0.0.1/127.0.0.1:36649:36649)] 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:39:03,235 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:39:03,235 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:39:03,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:39:03,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:39:03,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:03,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:03,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:39:03,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:39:03,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:03,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:03,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:39:03,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:39:03,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:03,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:03,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:39:03,241 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:39:03,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:03,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:03,241 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:39:03,242 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740 2024-11-23T06:39:03,243 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740 2024-11-23T06:39:03,244 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:39:03,244 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:39:03,244 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:39:03,246 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:39:03,246 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821373, jitterRate=0.04443073272705078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:39:03,246 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:39:03,247 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343943236Writing region info on filesystem at 1732343943236Initializing all the Stores at 1732343943236Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343943236Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343943236Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343943236Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343943236Cleaning up temporary data from old regions at 1732343943244 (+8 ms)Running coprocessor post-open hooks at 1732343943246 (+2 ms)Region opened successfully at 1732343943247 (+1 ms) 2024-11-23T06:39:03,248 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343943220 2024-11-23T06:39:03,250 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:39:03,250 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:39:03,251 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:03,252 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,34775,1732343941765, state=OPEN 2024-11-23T06:39:03,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:39:03,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:39:03,299 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,34775,1732343941765 2024-11-23T06:39:03,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:03,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:03,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:39:03,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,34775,1732343941765 in 231 msec 2024-11-23T06:39:03,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:39:03,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 722 msec 2024-11-23T06:39:03,310 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:03,311 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:39:03,312 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:39:03,312 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,34775,1732343941765, seqNum=-1] 2024-11-23T06:39:03,313 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:39:03,314 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41615, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:39:03,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 775 msec 2024-11-23T06:39:03,324 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343943324, completionTime=-1 2024-11-23T06:39:03,324 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:39:03,324 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732344003326 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732344063326 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44159,1732343941568-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44159,1732343941568-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44159,1732343941568-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:44159, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,326 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,327 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,328 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.493sec 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44159,1732343941568-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:39:03,330 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44159,1732343941568-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:39:03,332 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:39:03,332 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:39:03,332 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,44159,1732343941568-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:03,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b74706c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:39:03,387 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,44159,-1 for getting cluster id 2024-11-23T06:39:03,388 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:39:03,390 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'decfba6a-3101-4b4f-bc16-6b7e0703a63a' 2024-11-23T06:39:03,390 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:39:03,390 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "decfba6a-3101-4b4f-bc16-6b7e0703a63a" 2024-11-23T06:39:03,391 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@533a12fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:39:03,391 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,44159,-1] 2024-11-23T06:39:03,391 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:39:03,392 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:03,393 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:39:03,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11088abd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:39:03,395 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:39:03,396 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,34775,1732343941765, seqNum=-1] 2024-11-23T06:39:03,397 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:39:03,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:03,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:03,398 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37518, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:39:03,400 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,44159,1732343941568 2024-11-23T06:39:03,400 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:03,403 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:39:03,403 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T06:39:03,405 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is df2f15951535,44159,1732343941568 2024-11-23T06:39:03,405 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3d63bf95 2024-11-23T06:39:03,405 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T06:39:03,406 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37438, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T06:39:03,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T06:39:03,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T06:39:03,407 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:39:03,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-23T06:39:03,411 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T06:39:03,411 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:03,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-23T06:39:03,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:39:03,412 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T06:39:03,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741835_1011 (size=381) 2024-11-23T06:39:03,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741835_1011 (size=381) 2024-11-23T06:39:03,420 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e7784e49abc2b30af0cdcae90903a5dc, NAME => 'TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5 2024-11-23T06:39:03,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741836_1012 (size=64) 2024-11-23T06:39:03,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741836_1012 (size=64) 2024-11-23T06:39:03,427 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:03,428 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing e7784e49abc2b30af0cdcae90903a5dc, disabling compactions & flushes 2024-11-23T06:39:03,428 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,428 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,428 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. after waiting 0 ms 2024-11-23T06:39:03,428 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,428 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,428 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for e7784e49abc2b30af0cdcae90903a5dc: Waiting for close lock at 1732343943428Disabling compacts and flushes for region at 1732343943428Disabling writes for close at 1732343943428Writing region close event to WAL at 1732343943428Closed at 1732343943428 2024-11-23T06:39:03,429 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T06:39:03,430 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732343943429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343943429"}]},"ts":"1732343943429"} 2024-11-23T06:39:03,432 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T06:39:03,433 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T06:39:03,433 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343943433"}]},"ts":"1732343943433"} 2024-11-23T06:39:03,435 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-23T06:39:03,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, ASSIGN}] 2024-11-23T06:39:03,437 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, ASSIGN 2024-11-23T06:39:03,437 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, ASSIGN; state=OFFLINE, location=df2f15951535,34775,1732343941765; forceNewPlan=false, retain=false 2024-11-23T06:39:03,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,493 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,493 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,516 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:03,588 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e7784e49abc2b30af0cdcae90903a5dc, regionState=OPENING, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:03,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, ASSIGN because future has completed 2024-11-23T06:39:03,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e7784e49abc2b30af0cdcae90903a5dc, server=df2f15951535,34775,1732343941765}] 2024-11-23T06:39:03,752 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,753 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e7784e49abc2b30af0cdcae90903a5dc, NAME => 'TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:39:03,753 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,753 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:03,753 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,753 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,755 INFO [StoreOpener-e7784e49abc2b30af0cdcae90903a5dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,758 INFO [StoreOpener-e7784e49abc2b30af0cdcae90903a5dc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7784e49abc2b30af0cdcae90903a5dc columnFamilyName info 2024-11-23T06:39:03,758 DEBUG [StoreOpener-e7784e49abc2b30af0cdcae90903a5dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:03,758 INFO [StoreOpener-e7784e49abc2b30af0cdcae90903a5dc-1 {}] regionserver.HStore(327): Store=e7784e49abc2b30af0cdcae90903a5dc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:03,759 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,760 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,760 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,761 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,761 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,764 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,767 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:39:03,767 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e7784e49abc2b30af0cdcae90903a5dc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722519, jitterRate=-0.08127044141292572}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:39:03,767 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:03,768 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e7784e49abc2b30af0cdcae90903a5dc: Running coprocessor pre-open hook at 1732343943753Writing region info on filesystem at 1732343943753Initializing all the Stores at 1732343943755 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343943755Cleaning up temporary data from old regions at 1732343943761 (+6 ms)Running coprocessor post-open hooks at 1732343943767 (+6 ms)Region opened successfully at 1732343943768 (+1 ms) 2024-11-23T06:39:03,769 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., pid=6, masterSystemTime=1732343943748 2024-11-23T06:39:03,771 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,771 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:03,771 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e7784e49abc2b30af0cdcae90903a5dc, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:03,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e7784e49abc2b30af0cdcae90903a5dc, server=df2f15951535,34775,1732343941765 because future has completed 2024-11-23T06:39:03,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T06:39:03,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e7784e49abc2b30af0cdcae90903a5dc, server=df2f15951535,34775,1732343941765 in 182 msec 2024-11-23T06:39:03,779 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T06:39:03,779 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, ASSIGN in 342 msec 2024-11-23T06:39:03,780 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T06:39:03,780 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732343943780"}]},"ts":"1732343943780"} 2024-11-23T06:39:03,783 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-23T06:39:03,784 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T06:39:03,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 377 msec 2024-11-23T06:39:04,027 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:39:04,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:04,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:04,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:05,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:05,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:06,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:06,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:07,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:07,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:08,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:08,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:08,637 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T06:39:08,639 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-23T06:39:09,255 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:39:09,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,259 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,259 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:09,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:09,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:10,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:10,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:10,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:39:10,906 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T06:39:10,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:39:10,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T06:39:10,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T06:39:10,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T06:39:10,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-23T06:39:10,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-23T06:39:11,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:11,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:12,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:12,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:13,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:13,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T06:39:13,448 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-23T06:39:13,448 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-23T06:39:13,454 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-23T06:39:13,454 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:13,459 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., hostname=df2f15951535,34775,1732343941765, seqNum=2] 2024-11-23T06:39:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:13,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:39:13,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/b7cfd38fc044410d8c4361efaee4b526 is 1080, key is row0001/info:/1732343953460/Put/seqid=0 2024-11-23T06:39:13,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741837_1013 (size=12509) 2024-11-23T06:39:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741837_1013 (size=12509) 2024-11-23T06:39:13,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/b7cfd38fc044410d8c4361efaee4b526 2024-11-23T06:39:13,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/b7cfd38fc044410d8c4361efaee4b526 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b7cfd38fc044410d8c4361efaee4b526 2024-11-23T06:39:13,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b7cfd38fc044410d8c4361efaee4b526, entries=7, sequenceid=11, filesize=12.2 K 2024-11-23T06:39:13,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for e7784e49abc2b30af0cdcae90903a5dc in 43ms, sequenceid=11, compaction requested=false 2024-11-23T06:39:13,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:13,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-23T06:39:13,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/6cb68c0252e34665b4a6dc7142ada1ff is 1080, key is row0008/info:/1732343953477/Put/seqid=0 2024-11-23T06:39:13,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741838_1014 (size=27607) 2024-11-23T06:39:13,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741838_1014 (size=27607) 2024-11-23T06:39:13,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/6cb68c0252e34665b4a6dc7142ada1ff 2024-11-23T06:39:13,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/6cb68c0252e34665b4a6dc7142ada1ff as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff 2024-11-23T06:39:13,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff, entries=21, sequenceid=35, filesize=27.0 K 2024-11-23T06:39:13,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for e7784e49abc2b30af0cdcae90903a5dc in 20ms, sequenceid=35, compaction requested=false 2024-11-23T06:39:13,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:13,539 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.2 K, sizeToCheck=16.0 K 2024-11-23T06:39:13,539 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:13,539 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff because midkey is the same as first or last row 2024-11-23T06:39:14,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:14,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:15,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:15,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:15,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:39:15,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/02a06944780f4bf59bbf8078c6fbd9c2 is 1080, key is row0029/info:/1732343953520/Put/seqid=0 2024-11-23T06:39:15,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741839_1015 (size=12509) 2024-11-23T06:39:15,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741839_1015 (size=12509) 2024-11-23T06:39:15,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/02a06944780f4bf59bbf8078c6fbd9c2 2024-11-23T06:39:15,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/02a06944780f4bf59bbf8078c6fbd9c2 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/02a06944780f4bf59bbf8078c6fbd9c2 2024-11-23T06:39:15,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/02a06944780f4bf59bbf8078c6fbd9c2, entries=7, sequenceid=45, filesize=12.2 K 2024-11-23T06:39:15,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for e7784e49abc2b30af0cdcae90903a5dc in 25ms, sequenceid=45, compaction requested=true 2024-11-23T06:39:15,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:15,559 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=51.4 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,559 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,559 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff because midkey is the same as first or last row 2024-11-23T06:39:15,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e7784e49abc2b30af0cdcae90903a5dc:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:15,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:15,559 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:15,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T06:39:15,560 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 52625 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:15,561 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e7784e49abc2b30af0cdcae90903a5dc/info is initiating minor compaction (all files) 2024-11-23T06:39:15,561 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e7784e49abc2b30af0cdcae90903a5dc/info in TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:15,561 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b7cfd38fc044410d8c4361efaee4b526, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/02a06944780f4bf59bbf8078c6fbd9c2] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp, totalSize=51.4 K 2024-11-23T06:39:15,561 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7cfd38fc044410d8c4361efaee4b526, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732343953460 2024-11-23T06:39:15,562 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6cb68c0252e34665b4a6dc7142ada1ff, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=35, earliestPutTs=1732343953477 2024-11-23T06:39:15,562 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 02a06944780f4bf59bbf8078c6fbd9c2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732343953520 2024-11-23T06:39:15,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/f1abb83643704049ade4ce60762db749 is 1080, key is row0036/info:/1732343955534/Put/seqid=0 2024-11-23T06:39:15,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741840_1016 (size=17894) 2024-11-23T06:39:15,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741840_1016 (size=17894) 2024-11-23T06:39:15,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/f1abb83643704049ade4ce60762db749 2024-11-23T06:39:15,575 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e7784e49abc2b30af0cdcae90903a5dc#info#compaction#59 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:15,576 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/b5b0d8d54f87475db4b675fa105888f4 is 1080, key is row0001/info:/1732343953460/Put/seqid=0 2024-11-23T06:39:15,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/f1abb83643704049ade4ce60762db749 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/f1abb83643704049ade4ce60762db749 2024-11-23T06:39:15,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741841_1017 (size=42824) 2024-11-23T06:39:15,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741841_1017 (size=42824) 2024-11-23T06:39:15,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/f1abb83643704049ade4ce60762db749, entries=12, sequenceid=60, filesize=17.5 K 2024-11-23T06:39:15,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for e7784e49abc2b30af0cdcae90903a5dc in 22ms, sequenceid=60, compaction requested=false 2024-11-23T06:39:15,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:15,583 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,583 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:15,583 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff because midkey is the same as first or last row 2024-11-23T06:39:15,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-23T06:39:15,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/ce28041e94354448923f6eeba689d752 is 1080, key is row0048/info:/1732343955562/Put/seqid=0 2024-11-23T06:39:15,588 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/b5b0d8d54f87475db4b675fa105888f4 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 2024-11-23T06:39:15,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741842_1018 (size=15740) 2024-11-23T06:39:15,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741842_1018 (size=15740) 2024-11-23T06:39:15,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/ce28041e94354448923f6eeba689d752 2024-11-23T06:39:15,595 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e7784e49abc2b30af0cdcae90903a5dc/info of e7784e49abc2b30af0cdcae90903a5dc into b5b0d8d54f87475db4b675fa105888f4(size=41.8 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:15,595 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:15,595 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., storeName=e7784e49abc2b30af0cdcae90903a5dc/info, priority=13, startTime=1732343955559; duration=0sec 2024-11-23T06:39:15,595 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,595 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,595 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 because midkey is the same as first or last row 2024-11-23T06:39:15,595 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 because midkey is the same as first or last row 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 because midkey is the same as first or last row 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:15,596 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e7784e49abc2b30af0cdcae90903a5dc:info 2024-11-23T06:39:15,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/ce28041e94354448923f6eeba689d752 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce28041e94354448923f6eeba689d752 2024-11-23T06:39:15,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce28041e94354448923f6eeba689d752, entries=10, sequenceid=73, filesize=15.4 K 2024-11-23T06:39:15,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=7.36 KB/7532 for e7784e49abc2b30af0cdcae90903a5dc in 21ms, sequenceid=73, compaction requested=true 2024-11-23T06:39:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 because midkey is the same as first or last row 2024-11-23T06:39:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e7784e49abc2b30af0cdcae90903a5dc:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:15,604 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:15,606 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76458 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:15,606 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e7784e49abc2b30af0cdcae90903a5dc/info is initiating minor compaction (all files) 2024-11-23T06:39:15,606 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e7784e49abc2b30af0cdcae90903a5dc/info in TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:15,606 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/f1abb83643704049ade4ce60762db749, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce28041e94354448923f6eeba689d752] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp, totalSize=74.7 K 2024-11-23T06:39:15,606 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting b5b0d8d54f87475db4b675fa105888f4, keycount=35, bloomtype=ROW, size=41.8 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732343953460 2024-11-23T06:39:15,606 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1abb83643704049ade4ce60762db749, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732343955534 2024-11-23T06:39:15,607 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce28041e94354448923f6eeba689d752, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732343955562 2024-11-23T06:39:15,617 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e7784e49abc2b30af0cdcae90903a5dc#info#compaction#61 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:15,617 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/1f42f81137114c418f2656debb48d88a is 1080, key is row0001/info:/1732343953460/Put/seqid=0 2024-11-23T06:39:15,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741843_1019 (size=66689) 2024-11-23T06:39:15,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741843_1019 (size=66689) 2024-11-23T06:39:15,626 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/1f42f81137114c418f2656debb48d88a as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a 2024-11-23T06:39:15,631 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e7784e49abc2b30af0cdcae90903a5dc/info of e7784e49abc2b30af0cdcae90903a5dc into 1f42f81137114c418f2656debb48d88a(size=65.1 K), total size for store is 65.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:15,631 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., storeName=e7784e49abc2b30af0cdcae90903a5dc/info, priority=13, startTime=1732343955604; duration=0sec 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a because midkey is the same as first or last row 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a because midkey is the same as first or last row 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a because midkey is the same as first or last row 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:15,631 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e7784e49abc2b30af0cdcae90903a5dc:info 2024-11-23T06:39:16,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:16,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:17,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:17,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:17,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-23T06:39:17,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/ce3f4397186148b092dceb3e2bde781e is 1080, key is row0058/info:/1732343955584/Put/seqid=0 2024-11-23T06:39:17,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741844_1020 (size=13586) 2024-11-23T06:39:17,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741844_1020 (size=13586) 2024-11-23T06:39:17,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/ce3f4397186148b092dceb3e2bde781e 2024-11-23T06:39:17,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/ce3f4397186148b092dceb3e2bde781e as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce3f4397186148b092dceb3e2bde781e 2024-11-23T06:39:17,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce3f4397186148b092dceb3e2bde781e, entries=8, sequenceid=86, filesize=13.3 K 2024-11-23T06:39:17,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=11.56 KB/11836 for e7784e49abc2b30af0cdcae90903a5dc in 24ms, sequenceid=86, compaction requested=false 2024-11-23T06:39:17,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:17,628 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-23T06:39:17,628 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:17,628 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a because midkey is the same as first or last row 2024-11-23T06:39:17,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T06:39:17,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/c3462f1cbb2045c8bed3ff4ceb360cb5 is 1080, key is row0066/info:/1732343957605/Put/seqid=0 2024-11-23T06:39:17,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741845_1021 (size=17894) 2024-11-23T06:39:17,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741845_1021 (size=17894) 2024-11-23T06:39:17,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/c3462f1cbb2045c8bed3ff4ceb360cb5 2024-11-23T06:39:17,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/c3462f1cbb2045c8bed3ff4ceb360cb5 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c3462f1cbb2045c8bed3ff4ceb360cb5 2024-11-23T06:39:17,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c3462f1cbb2045c8bed3ff4ceb360cb5, entries=12, sequenceid=101, filesize=17.5 K 2024-11-23T06:39:17,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for e7784e49abc2b30af0cdcae90903a5dc in 21ms, sequenceid=101, compaction requested=true 2024-11-23T06:39:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-11-23T06:39:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a because midkey is the same as first or last row 2024-11-23T06:39:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e7784e49abc2b30af0cdcae90903a5dc:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:17,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:17,650 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:17,651 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:17,651 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e7784e49abc2b30af0cdcae90903a5dc/info is initiating minor compaction (all files) 2024-11-23T06:39:17,651 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e7784e49abc2b30af0cdcae90903a5dc/info in TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:17,651 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce3f4397186148b092dceb3e2bde781e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c3462f1cbb2045c8bed3ff4ceb360cb5] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp, totalSize=95.9 K 2024-11-23T06:39:17,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T06:39:17,652 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f42f81137114c418f2656debb48d88a, keycount=57, bloomtype=ROW, size=65.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732343953460 2024-11-23T06:39:17,652 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce3f4397186148b092dceb3e2bde781e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732343955584 2024-11-23T06:39:17,652 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting c3462f1cbb2045c8bed3ff4ceb360cb5, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732343957605 2024-11-23T06:39:17,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/c7a6d4753f1d472fb98773b765b0c66e is 1080, key is row0078/info:/1732343957630/Put/seqid=0 2024-11-23T06:39:17,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741846_1022 (size=17894) 2024-11-23T06:39:17,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741846_1022 (size=17894) 2024-11-23T06:39:17,664 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e7784e49abc2b30af0cdcae90903a5dc#info#compaction#65 average throughput is 26.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:17,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/c7a6d4753f1d472fb98773b765b0c66e 2024-11-23T06:39:17,665 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/08c66d39bb4d435ab98f6fd06e5c588f is 1080, key is row0001/info:/1732343953460/Put/seqid=0 2024-11-23T06:39:17,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741847_1023 (size=88408) 2024-11-23T06:39:17,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741847_1023 (size=88408) 2024-11-23T06:39:17,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/c7a6d4753f1d472fb98773b765b0c66e as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c7a6d4753f1d472fb98773b765b0c66e 2024-11-23T06:39:17,674 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/08c66d39bb4d435ab98f6fd06e5c588f as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f 2024-11-23T06:39:17,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c7a6d4753f1d472fb98773b765b0c66e, entries=12, sequenceid=116, filesize=17.5 K 2024-11-23T06:39:17,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for e7784e49abc2b30af0cdcae90903a5dc in 24ms, sequenceid=116, compaction requested=false 2024-11-23T06:39:17,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:17,675 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.3 K, sizeToCheck=16.0 K 2024-11-23T06:39:17,675 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:17,675 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a because midkey is the same as first or last row 2024-11-23T06:39:17,679 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e7784e49abc2b30af0cdcae90903a5dc/info of e7784e49abc2b30af0cdcae90903a5dc into 08c66d39bb4d435ab98f6fd06e5c588f(size=86.3 K), total size for store is 103.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e7784e49abc2b30af0cdcae90903a5dc: 2024-11-23T06:39:17,679 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., storeName=e7784e49abc2b30af0cdcae90903a5dc/info, priority=13, startTime=1732343957650; duration=0sec 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-23T06:39:17,679 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T06:39:17,680 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:17,680 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:17,680 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e7784e49abc2b30af0cdcae90903a5dc:info 2024-11-23T06:39:17,681 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44159 {}] assignment.AssignmentManager(1363): Split request from df2f15951535,34775,1732343941765, parent={ENCODED => e7784e49abc2b30af0cdcae90903a5dc, NAME => 'TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-23T06:39:17,685 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44159 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=df2f15951535,34775,1732343941765 2024-11-23T06:39:17,688 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44159 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e7784e49abc2b30af0cdcae90903a5dc, daughterA=23483c3a1a3b1df53f7352fa2f57ebb8, daughterB=e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:17,689 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e7784e49abc2b30af0cdcae90903a5dc, daughterA=23483c3a1a3b1df53f7352fa2f57ebb8, daughterB=e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:17,689 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e7784e49abc2b30af0cdcae90903a5dc, daughterA=23483c3a1a3b1df53f7352fa2f57ebb8, daughterB=e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:17,689 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e7784e49abc2b30af0cdcae90903a5dc, daughterA=23483c3a1a3b1df53f7352fa2f57ebb8, daughterB=e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:17,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, UNASSIGN}] 2024-11-23T06:39:17,698 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, UNASSIGN 2024-11-23T06:39:17,700 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=SPLITTING, location=df2f15951535,34775,1732343941765, table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T06:39:17,701 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=e7784e49abc2b30af0cdcae90903a5dc, regionState=CLOSING, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:17,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, UNASSIGN because future has completed 2024-11-23T06:39:17,704 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T06:39:17,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure e7784e49abc2b30af0cdcae90903a5dc, server=df2f15951535,34775,1732343941765}] 2024-11-23T06:39:17,865 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,865 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T06:39:17,866 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing e7784e49abc2b30af0cdcae90903a5dc, disabling compactions & flushes 2024-11-23T06:39:17,867 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:17,867 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:17,867 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. after waiting 0 ms 2024-11-23T06:39:17,867 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:17,867 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing e7784e49abc2b30af0cdcae90903a5dc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:39:17,876 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/d174f893f939440abc45f9c8cfb97219 is 1080, key is row0090/info:/1732343957653/Put/seqid=0 2024-11-23T06:39:17,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741848_1024 (size=12509) 2024-11-23T06:39:17,886 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/d174f893f939440abc45f9c8cfb97219 2024-11-23T06:39:17,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741848_1024 (size=12509) 2024-11-23T06:39:17,890 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/.tmp/info/d174f893f939440abc45f9c8cfb97219 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/d174f893f939440abc45f9c8cfb97219 2024-11-23T06:39:17,895 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/d174f893f939440abc45f9c8cfb97219, entries=7, sequenceid=127, filesize=12.2 K 2024-11-23T06:39:17,896 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e7784e49abc2b30af0cdcae90903a5dc in 29ms, sequenceid=127, compaction requested=true 2024-11-23T06:39:17,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b7cfd38fc044410d8c4361efaee4b526, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/02a06944780f4bf59bbf8078c6fbd9c2, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/f1abb83643704049ade4ce60762db749, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce28041e94354448923f6eeba689d752, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce3f4397186148b092dceb3e2bde781e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c3462f1cbb2045c8bed3ff4ceb360cb5] to archive 2024-11-23T06:39:17,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T06:39:17,900 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b7cfd38fc044410d8c4361efaee4b526 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b7cfd38fc044410d8c4361efaee4b526 2024-11-23T06:39:17,902 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/6cb68c0252e34665b4a6dc7142ada1ff 2024-11-23T06:39:17,903 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/b5b0d8d54f87475db4b675fa105888f4 2024-11-23T06:39:17,904 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/02a06944780f4bf59bbf8078c6fbd9c2 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/02a06944780f4bf59bbf8078c6fbd9c2 2024-11-23T06:39:17,906 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/f1abb83643704049ade4ce60762db749 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/f1abb83643704049ade4ce60762db749 2024-11-23T06:39:17,907 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/1f42f81137114c418f2656debb48d88a 2024-11-23T06:39:17,909 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce28041e94354448923f6eeba689d752 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce28041e94354448923f6eeba689d752 2024-11-23T06:39:17,911 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce3f4397186148b092dceb3e2bde781e to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/ce3f4397186148b092dceb3e2bde781e 2024-11-23T06:39:17,912 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c3462f1cbb2045c8bed3ff4ceb360cb5 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c3462f1cbb2045c8bed3ff4ceb360cb5 2024-11-23T06:39:17,920 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-23T06:39:17,921 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. 2024-11-23T06:39:17,921 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for e7784e49abc2b30af0cdcae90903a5dc: Waiting for close lock at 1732343957866Running coprocessor pre-close hooks at 1732343957866Disabling compacts and flushes for region at 1732343957866Disabling writes for close at 1732343957867 (+1 ms)Obtaining lock to block concurrent updates at 1732343957867Preparing flush snapshotting stores in e7784e49abc2b30af0cdcae90903a5dc at 1732343957867Finished memstore snapshotting TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., syncing WAL and waiting on mvcc, flushsize=dataSize=7532, getHeapSize=8304, getOffHeapSize=0, getCellsCount=7 at 1732343957868 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. at 1732343957870 (+2 ms)Flushing e7784e49abc2b30af0cdcae90903a5dc/info: creating writer at 1732343957870Flushing e7784e49abc2b30af0cdcae90903a5dc/info: appending metadata at 1732343957875 (+5 ms)Flushing e7784e49abc2b30af0cdcae90903a5dc/info: closing flushed file at 1732343957875Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fe7650: reopening flushed file at 1732343957890 (+15 ms)Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e7784e49abc2b30af0cdcae90903a5dc in 29ms, sequenceid=127, compaction requested=true at 1732343957896 (+6 ms)Writing region close event to WAL at 1732343957915 (+19 ms)Running coprocessor post-close hooks at 1732343957921 (+6 ms)Closed at 1732343957921 2024-11-23T06:39:17,924 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,925 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=e7784e49abc2b30af0cdcae90903a5dc, regionState=CLOSED 2024-11-23T06:39:17,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure e7784e49abc2b30af0cdcae90903a5dc, server=df2f15951535,34775,1732343941765 because future has completed 2024-11-23T06:39:17,931 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-23T06:39:17,931 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure e7784e49abc2b30af0cdcae90903a5dc, server=df2f15951535,34775,1732343941765 in 224 msec 2024-11-23T06:39:17,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T06:39:17,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e7784e49abc2b30af0cdcae90903a5dc, UNASSIGN in 234 msec 2024-11-23T06:39:17,941 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:17,944 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=e7784e49abc2b30af0cdcae90903a5dc, threads=3 2024-11-23T06:39:17,945 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f for region: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,945 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/d174f893f939440abc45f9c8cfb97219 for region: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,945 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c7a6d4753f1d472fb98773b765b0c66e for region: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,954 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/d174f893f939440abc45f9c8cfb97219, top=true 2024-11-23T06:39:17,954 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c7a6d4753f1d472fb98773b765b0c66e, top=true 2024-11-23T06:39:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741849_1025 (size=27) 2024-11-23T06:39:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741849_1025 (size=27) 2024-11-23T06:39:17,961 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219 for child: e0fca511df23d8fe0d1d186ec7439dbd, parent: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,961 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/d174f893f939440abc45f9c8cfb97219 for region: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,962 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e for child: e0fca511df23d8fe0d1d186ec7439dbd, parent: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,962 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/c7a6d4753f1d472fb98773b765b0c66e for region: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741850_1026 (size=27) 2024-11-23T06:39:17,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741850_1026 (size=27) 2024-11-23T06:39:17,968 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f for region: e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:17,970 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region e7784e49abc2b30af0cdcae90903a5dc Daughter A: [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc] storefiles, Daughter B: [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219] storefiles. 2024-11-23T06:39:17,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741851_1027 (size=71) 2024-11-23T06:39:17,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741851_1027 (size=71) 2024-11-23T06:39:17,979 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:17,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741852_1028 (size=71) 2024-11-23T06:39:17,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741852_1028 (size=71) 2024-11-23T06:39:17,991 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:18,000 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-23T06:39:18,003 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-23T06:39:18,006 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732343958005"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732343958005"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732343958005"}]},"ts":"1732343958005"} 2024-11-23T06:39:18,006 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732343958005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343958005"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732343958005"}]},"ts":"1732343958005"} 2024-11-23T06:39:18,006 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732343958005"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732343958005"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732343958005"}]},"ts":"1732343958005"} 2024-11-23T06:39:18,026 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23483c3a1a3b1df53f7352fa2f57ebb8, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0fca511df23d8fe0d1d186ec7439dbd, ASSIGN}] 2024-11-23T06:39:18,027 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23483c3a1a3b1df53f7352fa2f57ebb8, ASSIGN 2024-11-23T06:39:18,027 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0fca511df23d8fe0d1d186ec7439dbd, ASSIGN 2024-11-23T06:39:18,028 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23483c3a1a3b1df53f7352fa2f57ebb8, ASSIGN; state=SPLITTING_NEW, location=df2f15951535,34775,1732343941765; forceNewPlan=false, retain=false 2024-11-23T06:39:18,028 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0fca511df23d8fe0d1d186ec7439dbd, ASSIGN; state=SPLITTING_NEW, location=df2f15951535,34775,1732343941765; forceNewPlan=false, retain=false 2024-11-23T06:39:18,179 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e0fca511df23d8fe0d1d186ec7439dbd, regionState=OPENING, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:18,179 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=23483c3a1a3b1df53f7352fa2f57ebb8, regionState=OPENING, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:18,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23483c3a1a3b1df53f7352fa2f57ebb8, ASSIGN because future has completed 2024-11-23T06:39:18,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 23483c3a1a3b1df53f7352fa2f57ebb8, server=df2f15951535,34775,1732343941765}] 2024-11-23T06:39:18,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0fca511df23d8fe0d1d186ec7439dbd, ASSIGN because future has completed 2024-11-23T06:39:18,188 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765}] 2024-11-23T06:39:18,346 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:18,347 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 23483c3a1a3b1df53f7352fa2f57ebb8, NAME => 'TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-23T06:39:18,348 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,348 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:18,348 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,348 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,350 INFO [StoreOpener-23483c3a1a3b1df53f7352fa2f57ebb8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,351 INFO [StoreOpener-23483c3a1a3b1df53f7352fa2f57ebb8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 23483c3a1a3b1df53f7352fa2f57ebb8 columnFamilyName info 2024-11-23T06:39:18,351 DEBUG [StoreOpener-23483c3a1a3b1df53f7352fa2f57ebb8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:18,366 DEBUG [StoreOpener-23483c3a1a3b1df53f7352fa2f57ebb8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc->hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f-bottom 2024-11-23T06:39:18,367 INFO [StoreOpener-23483c3a1a3b1df53f7352fa2f57ebb8-1 {}] regionserver.HStore(327): Store=23483c3a1a3b1df53f7352fa2f57ebb8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:18,368 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,369 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,370 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,371 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,371 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,373 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,374 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 23483c3a1a3b1df53f7352fa2f57ebb8; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721274, jitterRate=-0.08285322785377502}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:39:18,374 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:18,375 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 23483c3a1a3b1df53f7352fa2f57ebb8: Running coprocessor pre-open hook at 1732343958348Writing region info on filesystem at 1732343958348Initializing all the Stores at 1732343958350 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343958350Cleaning up temporary data from old regions at 1732343958371 (+21 ms)Running coprocessor post-open hooks at 1732343958374 (+3 ms)Region opened successfully at 1732343958375 (+1 ms) 2024-11-23T06:39:18,376 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8., pid=12, masterSystemTime=1732343958339 2024-11-23T06:39:18,376 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 23483c3a1a3b1df53f7352fa2f57ebb8:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:18,376 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:18,376 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-23T06:39:18,377 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:18,377 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): 23483c3a1a3b1df53f7352fa2f57ebb8/info is initiating minor compaction (all files) 2024-11-23T06:39:18,377 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 23483c3a1a3b1df53f7352fa2f57ebb8/info in TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:18,377 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc->hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f-bottom] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/.tmp, totalSize=86.3 K 2024-11-23T06:39:18,378 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732343953460 2024-11-23T06:39:18,379 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:18,379 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:18,379 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:18,379 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => e0fca511df23d8fe0d1d186ec7439dbd, NAME => 'TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-23T06:39:18,379 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,379 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:18,379 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,379 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,380 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=23483c3a1a3b1df53f7352fa2f57ebb8, regionState=OPEN, openSeqNum=131, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:18,381 INFO [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,382 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-23T06:39:18,382 INFO [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e0fca511df23d8fe0d1d186ec7439dbd columnFamilyName info 2024-11-23T06:39:18,382 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-23T06:39:18,382 DEBUG [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:18,382 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-23T06:39:18,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 23483c3a1a3b1df53f7352fa2f57ebb8, server=df2f15951535,34775,1732343941765 because future has completed 2024-11-23T06:39:18,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-23T06:39:18,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 23483c3a1a3b1df53f7352fa2f57ebb8, server=df2f15951535,34775,1732343941765 in 198 msec 2024-11-23T06:39:18,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=23483c3a1a3b1df53f7352fa2f57ebb8, ASSIGN in 361 msec 2024-11-23T06:39:18,397 DEBUG [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc->hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f-top 2024-11-23T06:39:18,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/info/c5612f2ba41d4200bdc91a19474fbc6b is 193, key is TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd./info:regioninfo/1732343958179/Put/seqid=0 2024-11-23T06:39:18,401 DEBUG [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e 2024-11-23T06:39:18,403 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 23483c3a1a3b1df53f7352fa2f57ebb8#info#compaction#67 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:18,403 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/.tmp/info/1ba362c987ab4704a87be50fa0c2a365 is 1080, key is row0001/info:/1732343953460/Put/seqid=0 2024-11-23T06:39:18,406 DEBUG [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219 2024-11-23T06:39:18,407 INFO [StoreOpener-e0fca511df23d8fe0d1d186ec7439dbd-1 {}] regionserver.HStore(327): Store=e0fca511df23d8fe0d1d186ec7439dbd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:18,407 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,407 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,408 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,409 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,409 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741853_1029 (size=9847) 2024-11-23T06:39:18,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741853_1029 (size=9847) 2024-11-23T06:39:18,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/info/c5612f2ba41d4200bdc91a19474fbc6b 2024-11-23T06:39:18,411 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,412 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened e0fca511df23d8fe0d1d186ec7439dbd; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694559, jitterRate=-0.11682356894016266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T06:39:18,412 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:18,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:18,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:18,412 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for e0fca511df23d8fe0d1d186ec7439dbd: Running coprocessor pre-open hook at 1732343958380Writing region info on filesystem at 1732343958380Initializing all the Stores at 1732343958380Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343958380Cleaning up temporary data from old regions at 1732343958409 (+29 ms)Running coprocessor post-open hooks at 1732343958412 (+3 ms)Region opened successfully at 1732343958412 2024-11-23T06:39:18,413 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., pid=13, masterSystemTime=1732343958339 2024-11-23T06:39:18,413 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 2 2024-11-23T06:39:18,413 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:18,413 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:18,414 INFO [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:18,414 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:18,414 INFO [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:18,415 INFO [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc->hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f-top, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=116.0 K 2024-11-23T06:39:18,415 DEBUG [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:18,415 INFO [RS_OPEN_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:18,415 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] compactions.Compactor(225): Compacting 08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732343953460 2024-11-23T06:39:18,416 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732343957630 2024-11-23T06:39:18,416 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e0fca511df23d8fe0d1d186ec7439dbd, regionState=OPEN, openSeqNum=131, regionLocation=df2f15951535,34775,1732343941765 2024-11-23T06:39:18,416 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732343957653 2024-11-23T06:39:18,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741854_1030 (size=70862) 2024-11-23T06:39:18,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765 because future has completed 2024-11-23T06:39:18,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741854_1030 (size=70862) 2024-11-23T06:39:18,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-23T06:39:18,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765 in 241 msec 2024-11-23T06:39:18,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-23T06:39:18,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e0fca511df23d8fe0d1d186ec7439dbd, ASSIGN in 407 msec 2024-11-23T06:39:18,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=e7784e49abc2b30af0cdcae90903a5dc, daughterA=23483c3a1a3b1df53f7352fa2f57ebb8, daughterB=e0fca511df23d8fe0d1d186ec7439dbd in 750 msec 2024-11-23T06:39:18,437 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/.tmp/info/1ba362c987ab4704a87be50fa0c2a365 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/1ba362c987ab4704a87be50fa0c2a365 2024-11-23T06:39:18,444 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 23483c3a1a3b1df53f7352fa2f57ebb8/info of 23483c3a1a3b1df53f7352fa2f57ebb8 into 1ba362c987ab4704a87be50fa0c2a365(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:18,444 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 23483c3a1a3b1df53f7352fa2f57ebb8: 2024-11-23T06:39:18,444 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8., storeName=23483c3a1a3b1df53f7352fa2f57ebb8/info, priority=15, startTime=1732343958376; duration=0sec 2024-11-23T06:39:18,444 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:18,444 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 23483c3a1a3b1df53f7352fa2f57ebb8:info 2024-11-23T06:39:18,445 INFO [RS:0;df2f15951535:34775-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:18,446 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/602878de4f404edcae942151c1fed03b is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:18,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741855_1031 (size=42984) 2024-11-23T06:39:18,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741855_1031 (size=42984) 2024-11-23T06:39:18,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/ns/f65aa7eebbd34ccfaefe95c28487583a is 43, key is default/ns:d/1732343943315/Put/seqid=0 2024-11-23T06:39:18,461 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/602878de4f404edcae942151c1fed03b as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/602878de4f404edcae942151c1fed03b 2024-11-23T06:39:18,467 INFO [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into 602878de4f404edcae942151c1fed03b(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:18,467 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:18,467 INFO [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343958413; duration=0sec 2024-11-23T06:39:18,467 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:18,467 DEBUG [RS:0;df2f15951535:34775-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:18,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741856_1032 (size=5153) 2024-11-23T06:39:18,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741856_1032 (size=5153) 2024-11-23T06:39:18,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/ns/f65aa7eebbd34ccfaefe95c28487583a 2024-11-23T06:39:18,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/table/2979934b350a49778d00f648fc8ad4e3 is 65, key is TestLogRolling-testLogRolling/table:state/1732343943780/Put/seqid=0 2024-11-23T06:39:18,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741857_1033 (size=5340) 2024-11-23T06:39:18,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741857_1033 (size=5340) 2024-11-23T06:39:18,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/table/2979934b350a49778d00f648fc8ad4e3 2024-11-23T06:39:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/info/c5612f2ba41d4200bdc91a19474fbc6b as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/info/c5612f2ba41d4200bdc91a19474fbc6b 2024-11-23T06:39:18,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/info/c5612f2ba41d4200bdc91a19474fbc6b, entries=30, sequenceid=17, filesize=9.6 K 2024-11-23T06:39:18,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/ns/f65aa7eebbd34ccfaefe95c28487583a as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/ns/f65aa7eebbd34ccfaefe95c28487583a 2024-11-23T06:39:18,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/ns/f65aa7eebbd34ccfaefe95c28487583a, entries=2, sequenceid=17, filesize=5.0 K 2024-11-23T06:39:18,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/table/2979934b350a49778d00f648fc8ad4e3 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/table/2979934b350a49778d00f648fc8ad4e3 2024-11-23T06:39:18,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/table/2979934b350a49778d00f648fc8ad4e3, entries=2, sequenceid=17, filesize=5.2 K 2024-11-23T06:39:18,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 136ms, sequenceid=17, compaction requested=false 2024-11-23T06:39:18,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-23T06:39:19,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:19,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:19,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:37518 deadline: 1732343969669, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. is not online on df2f15951535,34775,1732343941765 2024-11-23T06:39:19,698 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., hostname=df2f15951535,34775,1732343941765, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., hostname=df2f15951535,34775,1732343941765, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. is not online on df2f15951535,34775,1732343941765 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T06:39:19,699 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., hostname=df2f15951535,34775,1732343941765, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc. is not online on df2f15951535,34775,1732343941765 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T06:39:19,699 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732343943407.e7784e49abc2b30af0cdcae90903a5dc., hostname=df2f15951535,34775,1732343941765, seqNum=2 from cache 2024-11-23T06:39:20,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:20,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:21,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:21,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:22,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:22,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:22,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:22,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:23,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:23,478 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T06:39:23,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:23,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:24,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:24,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:25,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:25,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:26,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:26,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:27,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:27,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:28,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:28,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:29,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:29,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:29,731 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., hostname=df2f15951535,34775,1732343941765, seqNum=131] 2024-11-23T06:39:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:29,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:39:29,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/4e2a662f63ba41a8a522f357083330b6 is 1080, key is row0097/info:/1732343969732/Put/seqid=0 2024-11-23T06:39:29,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741858_1034 (size=12516) 2024-11-23T06:39:29,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741858_1034 (size=12516) 2024-11-23T06:39:29,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/4e2a662f63ba41a8a522f357083330b6 2024-11-23T06:39:29,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/4e2a662f63ba41a8a522f357083330b6 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/4e2a662f63ba41a8a522f357083330b6 2024-11-23T06:39:29,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/4e2a662f63ba41a8a522f357083330b6, entries=7, sequenceid=141, filesize=12.2 K 2024-11-23T06:39:29,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for e0fca511df23d8fe0d1d186ec7439dbd in 26ms, sequenceid=141, compaction requested=false 2024-11-23T06:39:29,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:29,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T06:39:29,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/bb04f312563d439883c08f7bae9fd689 is 1080, key is row0104/info:/1732343969747/Put/seqid=0 2024-11-23T06:39:29,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741859_1035 (size=17906) 2024-11-23T06:39:29,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741859_1035 (size=17906) 2024-11-23T06:39:29,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/bb04f312563d439883c08f7bae9fd689 2024-11-23T06:39:29,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/bb04f312563d439883c08f7bae9fd689 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bb04f312563d439883c08f7bae9fd689 2024-11-23T06:39:29,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bb04f312563d439883c08f7bae9fd689, entries=12, sequenceid=156, filesize=17.5 K 2024-11-23T06:39:29,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for e0fca511df23d8fe0d1d186ec7439dbd in 35ms, sequenceid=156, compaction requested=true 2024-11-23T06:39:29,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:29,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:29,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:29,808 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:29,809 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:29,809 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:29,809 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:29,809 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/602878de4f404edcae942151c1fed03b, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/4e2a662f63ba41a8a522f357083330b6, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bb04f312563d439883c08f7bae9fd689] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=71.7 K 2024-11-23T06:39:29,810 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 602878de4f404edcae942151c1fed03b, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732343955593 2024-11-23T06:39:29,810 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e2a662f63ba41a8a522f357083330b6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732343969732 2024-11-23T06:39:29,810 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb04f312563d439883c08f7bae9fd689, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732343969747 2024-11-23T06:39:29,822 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#74 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:29,822 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1a4a949c59954087a8dd760425712715 is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:29,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741860_1036 (size=63636) 2024-11-23T06:39:29,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741860_1036 (size=63636) 2024-11-23T06:39:29,836 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1a4a949c59954087a8dd760425712715 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1a4a949c59954087a8dd760425712715 2024-11-23T06:39:29,842 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into 1a4a949c59954087a8dd760425712715(size=62.1 K), total size for store is 62.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:29,842 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:29,842 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343969808; duration=0sec 2024-11-23T06:39:29,842 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:29,842 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:30,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:30,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:31,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:31,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:31,549 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T06:39:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:31,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-23T06:39:31,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/bbf0d1c6de5946a79719d6dee6dfa700 is 1080, key is row0116/info:/1732343969773/Put/seqid=0 2024-11-23T06:39:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741861_1037 (size=20078) 2024-11-23T06:39:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741861_1037 (size=20078) 2024-11-23T06:39:31,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/bbf0d1c6de5946a79719d6dee6dfa700 2024-11-23T06:39:31,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/bbf0d1c6de5946a79719d6dee6dfa700 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bbf0d1c6de5946a79719d6dee6dfa700 2024-11-23T06:39:31,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bbf0d1c6de5946a79719d6dee6dfa700, entries=14, sequenceid=174, filesize=19.6 K 2024-11-23T06:39:31,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for e0fca511df23d8fe0d1d186ec7439dbd in 25ms, sequenceid=174, compaction requested=false 2024-11-23T06:39:31,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:31,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-23T06:39:31,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/9fe52524f47d4d088ab0975e4c5d17ba is 1080, key is row0130/info:/1732343971806/Put/seqid=0 2024-11-23T06:39:31,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741862_1038 (size=16828) 2024-11-23T06:39:31,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741862_1038 (size=16828) 2024-11-23T06:39:31,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/9fe52524f47d4d088ab0975e4c5d17ba 2024-11-23T06:39:31,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/9fe52524f47d4d088ab0975e4c5d17ba as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/9fe52524f47d4d088ab0975e4c5d17ba 2024-11-23T06:39:31,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/9fe52524f47d4d088ab0975e4c5d17ba, entries=11, sequenceid=188, filesize=16.4 K 2024-11-23T06:39:31,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for e0fca511df23d8fe0d1d186ec7439dbd in 47ms, sequenceid=188, compaction requested=true 2024-11-23T06:39:31,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:31,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:31,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:31,879 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:31,879 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100542 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:31,880 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:31,880 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:31,880 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1a4a949c59954087a8dd760425712715, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bbf0d1c6de5946a79719d6dee6dfa700, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/9fe52524f47d4d088ab0975e4c5d17ba] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=98.2 K 2024-11-23T06:39:31,880 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a4a949c59954087a8dd760425712715, keycount=54, bloomtype=ROW, size=62.1 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732343955593 2024-11-23T06:39:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:31,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-23T06:39:31,880 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting bbf0d1c6de5946a79719d6dee6dfa700, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732343969773 2024-11-23T06:39:31,881 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9fe52524f47d4d088ab0975e4c5d17ba, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732343971806 2024-11-23T06:39:31,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/739ad169584140ad8af95289cc7ffa34 is 1080, key is row0141/info:/1732343971832/Put/seqid=0 2024-11-23T06:39:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741863_1039 (size=21156) 2024-11-23T06:39:31,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741863_1039 (size=21156) 2024-11-23T06:39:31,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/739ad169584140ad8af95289cc7ffa34 2024-11-23T06:39:31,902 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#78 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:31,903 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/e810c93a7950429685e04229284a5b57 is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:31,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/739ad169584140ad8af95289cc7ffa34 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/739ad169584140ad8af95289cc7ffa34 2024-11-23T06:39:31,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741864_1040 (size=90765) 2024-11-23T06:39:31,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741864_1040 (size=90765) 2024-11-23T06:39:31,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/739ad169584140ad8af95289cc7ffa34, entries=15, sequenceid=206, filesize=20.7 K 2024-11-23T06:39:31,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=5.25 KB/5380 for e0fca511df23d8fe0d1d186ec7439dbd in 34ms, sequenceid=206, compaction requested=false 2024-11-23T06:39:31,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:32,323 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/e810c93a7950429685e04229284a5b57 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e810c93a7950429685e04229284a5b57 2024-11-23T06:39:32,329 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into e810c93a7950429685e04229284a5b57(size=88.6 K), total size for store is 109.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:32,329 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:32,329 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343971878; duration=0sec 2024-11-23T06:39:32,329 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:32,329 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:32,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:32,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:33,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:33,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:33,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:33,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:39:33,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1783204f1d5a4bf48d65d56b3292f22e is 1080, key is row0156/info:/1732343971882/Put/seqid=0 2024-11-23T06:39:33,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741865_1041 (size=12516) 2024-11-23T06:39:33,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741865_1041 (size=12516) 2024-11-23T06:39:33,913 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1783204f1d5a4bf48d65d56b3292f22e 2024-11-23T06:39:33,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1783204f1d5a4bf48d65d56b3292f22e as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1783204f1d5a4bf48d65d56b3292f22e 2024-11-23T06:39:33,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1783204f1d5a4bf48d65d56b3292f22e, entries=7, sequenceid=217, filesize=12.2 K 2024-11-23T06:39:33,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for e0fca511df23d8fe0d1d186ec7439dbd in 26ms, sequenceid=217, compaction requested=true 2024-11-23T06:39:33,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:33,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:33,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:33,926 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:33,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:33,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-23T06:39:33,927 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 124437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:33,927 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:33,927 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:33,927 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e810c93a7950429685e04229284a5b57, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/739ad169584140ad8af95289cc7ffa34, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1783204f1d5a4bf48d65d56b3292f22e] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=121.5 K 2024-11-23T06:39:33,927 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting e810c93a7950429685e04229284a5b57, keycount=79, bloomtype=ROW, size=88.6 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1732343955593 2024-11-23T06:39:33,928 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 739ad169584140ad8af95289cc7ffa34, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732343971832 2024-11-23T06:39:33,928 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1783204f1d5a4bf48d65d56b3292f22e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732343971882 2024-11-23T06:39:33,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c04f9a0b54334d2ea2713c46c9f0b581 is 1080, key is row0163/info:/1732343973900/Put/seqid=0 2024-11-23T06:39:33,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741866_1042 (size=19000) 2024-11-23T06:39:33,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741866_1042 (size=19000) 2024-11-23T06:39:33,940 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#81 average throughput is 51.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:33,940 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/e62c42108d9b4a91a1e39ef4e0ed4018 is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:33,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741867_1043 (size=114587) 2024-11-23T06:39:33,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741867_1043 (size=114587) 2024-11-23T06:39:33,954 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/e62c42108d9b4a91a1e39ef4e0ed4018 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e62c42108d9b4a91a1e39ef4e0ed4018 2024-11-23T06:39:33,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-23T06:39:33,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:37518 deadline: 1732343983959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765 2024-11-23T06:39:33,961 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., hostname=df2f15951535,34775,1732343941765, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., hostname=df2f15951535,34775,1732343941765, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T06:39:33,961 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., hostname=df2f15951535,34775,1732343941765, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=e0fca511df23d8fe0d1d186ec7439dbd, server=df2f15951535,34775,1732343941765 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T06:39:33,961 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., hostname=df2f15951535,34775,1732343941765, seqNum=131 because the exception is null or not the one we care about 2024-11-23T06:39:33,962 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into e62c42108d9b4a91a1e39ef4e0ed4018(size=111.9 K), total size for store is 111.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:33,962 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:33,962 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343973926; duration=0sec 2024-11-23T06:39:33,962 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:33,962 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:34,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c04f9a0b54334d2ea2713c46c9f0b581 2024-11-23T06:39:34,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c04f9a0b54334d2ea2713c46c9f0b581 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c04f9a0b54334d2ea2713c46c9f0b581 2024-11-23T06:39:34,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c04f9a0b54334d2ea2713c46c9f0b581, entries=13, sequenceid=233, filesize=18.6 K 2024-11-23T06:39:34,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for e0fca511df23d8fe0d1d186ec7439dbd in 428ms, sequenceid=233, compaction requested=false 2024-11-23T06:39:34,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:34,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:34,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:35,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:35,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:36,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:36,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:37,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:37,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:38,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:38,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:39,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:39,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:40,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:40,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:41,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:41,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:42,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:42,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:43,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:43,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:43,700 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T06:39:43,700 INFO [master/df2f15951535:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T06:39:44,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:44,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-23T06:39:44,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/6a7dada823934fca83cdce00fda0c855 is 1080, key is row0176/info:/1732343973927/Put/seqid=0 2024-11-23T06:39:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741868_1044 (size=23316) 2024-11-23T06:39:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741868_1044 (size=23316) 2024-11-23T06:39:44,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/6a7dada823934fca83cdce00fda0c855 2024-11-23T06:39:44,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/6a7dada823934fca83cdce00fda0c855 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6a7dada823934fca83cdce00fda0c855 2024-11-23T06:39:44,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6a7dada823934fca83cdce00fda0c855, entries=17, sequenceid=254, filesize=22.8 K 2024-11-23T06:39:44,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=0 B/0 for e0fca511df23d8fe0d1d186ec7439dbd in 26ms, sequenceid=254, compaction requested=true 2024-11-23T06:39:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:44,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:44,053 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:44,054 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 156903 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:44,054 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:44,054 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:44,055 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e62c42108d9b4a91a1e39ef4e0ed4018, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c04f9a0b54334d2ea2713c46c9f0b581, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6a7dada823934fca83cdce00fda0c855] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=153.2 K 2024-11-23T06:39:44,055 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting e62c42108d9b4a91a1e39ef4e0ed4018, keycount=101, bloomtype=ROW, size=111.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732343955593 2024-11-23T06:39:44,055 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting c04f9a0b54334d2ea2713c46c9f0b581, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732343973900 2024-11-23T06:39:44,055 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a7dada823934fca83cdce00fda0c855, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732343973927 2024-11-23T06:39:44,065 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#83 average throughput is 67.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:44,066 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/a4a19b19e6334d2bb8d3124b5cbd3805 is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:44,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741869_1045 (size=147234) 2024-11-23T06:39:44,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741869_1045 (size=147234) 2024-11-23T06:39:44,075 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/a4a19b19e6334d2bb8d3124b5cbd3805 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/a4a19b19e6334d2bb8d3124b5cbd3805 2024-11-23T06:39:44,082 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into a4a19b19e6334d2bb8d3124b5cbd3805(size=143.8 K), total size for store is 143.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:44,082 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:44,082 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343984053; duration=0sec 2024-11-23T06:39:44,082 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:44,082 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:44,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:44,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:45,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:45,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:46,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T06:39:46,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/cbbca5d6ec304a97bdce538327553853 is 1080, key is row0193/info:/1732343986030/Put/seqid=0 2024-11-23T06:39:46,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741870_1046 (size=12523) 2024-11-23T06:39:46,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741870_1046 (size=12523) 2024-11-23T06:39:46,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/cbbca5d6ec304a97bdce538327553853 2024-11-23T06:39:46,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/cbbca5d6ec304a97bdce538327553853 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/cbbca5d6ec304a97bdce538327553853 2024-11-23T06:39:46,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/cbbca5d6ec304a97bdce538327553853, entries=7, sequenceid=265, filesize=12.2 K 2024-11-23T06:39:46,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for e0fca511df23d8fe0d1d186ec7439dbd in 23ms, sequenceid=265, compaction requested=false 2024-11-23T06:39:46,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:46,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T06:39:46,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1439b4842b4041398de1d011155ce20e is 1080, key is row0200/info:/1732343986048/Put/seqid=0 2024-11-23T06:39:46,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741871_1047 (size=17918) 2024-11-23T06:39:46,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741871_1047 (size=17918) 2024-11-23T06:39:46,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1439b4842b4041398de1d011155ce20e 2024-11-23T06:39:46,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/1439b4842b4041398de1d011155ce20e as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1439b4842b4041398de1d011155ce20e 2024-11-23T06:39:46,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1439b4842b4041398de1d011155ce20e, entries=12, sequenceid=280, filesize=17.5 K 2024-11-23T06:39:46,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for e0fca511df23d8fe0d1d186ec7439dbd in 29ms, sequenceid=280, compaction requested=true 2024-11-23T06:39:46,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:46,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:46,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:46,101 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:46,102 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 177675 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:46,102 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:46,102 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:46,102 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/a4a19b19e6334d2bb8d3124b5cbd3805, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/cbbca5d6ec304a97bdce538327553853, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1439b4842b4041398de1d011155ce20e] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=173.5 K 2024-11-23T06:39:46,102 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting a4a19b19e6334d2bb8d3124b5cbd3805, keycount=131, bloomtype=ROW, size=143.8 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732343955593 2024-11-23T06:39:46,103 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting cbbca5d6ec304a97bdce538327553853, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1732343986030 2024-11-23T06:39:46,103 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1439b4842b4041398de1d011155ce20e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732343986048 2024-11-23T06:39:46,114 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#86 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:46,114 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/7de5d767115c4035a86c4fd30f1766b3 is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741872_1048 (size=167841) 2024-11-23T06:39:46,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741872_1048 (size=167841) 2024-11-23T06:39:46,121 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/7de5d767115c4035a86c4fd30f1766b3 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/7de5d767115c4035a86c4fd30f1766b3 2024-11-23T06:39:46,127 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into 7de5d767115c4035a86c4fd30f1766b3(size=163.9 K), total size for store is 163.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:46,127 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:46,127 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343986101; duration=0sec 2024-11-23T06:39:46,127 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:46,127 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:46,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:46,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:47,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:47,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:48,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:48,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-23T06:39:48,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/f1a67b03c3a742b392080e18265aec39 is 1080, key is row0212/info:/1732343986072/Put/seqid=0 2024-11-23T06:39:48,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741873_1049 (size=20092) 2024-11-23T06:39:48,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741873_1049 (size=20092) 2024-11-23T06:39:48,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/f1a67b03c3a742b392080e18265aec39 2024-11-23T06:39:48,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/f1a67b03c3a742b392080e18265aec39 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/f1a67b03c3a742b392080e18265aec39 2024-11-23T06:39:48,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/f1a67b03c3a742b392080e18265aec39, entries=14, sequenceid=298, filesize=19.6 K 2024-11-23T06:39:48,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for e0fca511df23d8fe0d1d186ec7439dbd in 28ms, sequenceid=298, compaction requested=false 2024-11-23T06:39:48,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:48,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:48,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-23T06:39:48,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/6122ce00ef1549bb8e92805fd49820bf is 1080, key is row0226/info:/1732343988105/Put/seqid=0 2024-11-23T06:39:48,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741874_1050 (size=19013) 2024-11-23T06:39:48,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741874_1050 (size=19013) 2024-11-23T06:39:48,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/6122ce00ef1549bb8e92805fd49820bf 2024-11-23T06:39:48,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/6122ce00ef1549bb8e92805fd49820bf as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6122ce00ef1549bb8e92805fd49820bf 2024-11-23T06:39:48,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6122ce00ef1549bb8e92805fd49820bf, entries=13, sequenceid=314, filesize=18.6 K 2024-11-23T06:39:48,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for e0fca511df23d8fe0d1d186ec7439dbd in 21ms, sequenceid=314, compaction requested=true 2024-11-23T06:39:48,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:48,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e0fca511df23d8fe0d1d186ec7439dbd:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T06:39:48,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:48,153 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T06:39:48,154 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 206946 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T06:39:48,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34775 {}] regionserver.HRegion(8855): Flush requested on e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:48,154 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1541): e0fca511df23d8fe0d1d186ec7439dbd/info is initiating minor compaction (all files) 2024-11-23T06:39:48,154 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e0fca511df23d8fe0d1d186ec7439dbd/info in TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:48,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T06:39:48,154 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/7de5d767115c4035a86c4fd30f1766b3, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/f1a67b03c3a742b392080e18265aec39, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6122ce00ef1549bb8e92805fd49820bf] into tmpdir=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp, totalSize=202.1 K 2024-11-23T06:39:48,154 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7de5d767115c4035a86c4fd30f1766b3, keycount=150, bloomtype=ROW, size=163.9 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732343955593 2024-11-23T06:39:48,155 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1a67b03c3a742b392080e18265aec39, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732343986072 2024-11-23T06:39:48,155 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6122ce00ef1549bb8e92805fd49820bf, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732343988105 2024-11-23T06:39:48,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c7c705a3eb6a4ccb93cf2fc209a32f8b is 1080, key is row0239/info:/1732343988132/Put/seqid=0 2024-11-23T06:39:48,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741875_1051 (size=17918) 2024-11-23T06:39:48,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741875_1051 (size=17918) 2024-11-23T06:39:48,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c7c705a3eb6a4ccb93cf2fc209a32f8b 2024-11-23T06:39:48,168 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e0fca511df23d8fe0d1d186ec7439dbd#info#compaction#90 average throughput is 90.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T06:39:48,169 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c401ba6af439482cb27fd39ac63c73a4 is 1080, key is row0062/info:/1732343955593/Put/seqid=0 2024-11-23T06:39:48,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c7c705a3eb6a4ccb93cf2fc209a32f8b as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c7c705a3eb6a4ccb93cf2fc209a32f8b 2024-11-23T06:39:48,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741876_1052 (size=197084) 2024-11-23T06:39:48,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741876_1052 (size=197084) 2024-11-23T06:39:48,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c7c705a3eb6a4ccb93cf2fc209a32f8b, entries=12, sequenceid=329, filesize=17.5 K 2024-11-23T06:39:48,179 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/c401ba6af439482cb27fd39ac63c73a4 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c401ba6af439482cb27fd39ac63c73a4 2024-11-23T06:39:48,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=6.30 KB/6456 for e0fca511df23d8fe0d1d186ec7439dbd in 25ms, sequenceid=329, compaction requested=false 2024-11-23T06:39:48,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:48,185 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e0fca511df23d8fe0d1d186ec7439dbd/info of e0fca511df23d8fe0d1d186ec7439dbd into c401ba6af439482cb27fd39ac63c73a4(size=192.5 K), total size for store is 210.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T06:39:48,185 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:48,185 INFO [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., storeName=e0fca511df23d8fe0d1d186ec7439dbd/info, priority=13, startTime=1732343988153; duration=0sec 2024-11-23T06:39:48,185 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T06:39:48,185 DEBUG [RS:0;df2f15951535:34775-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e0fca511df23d8fe0d1d186ec7439dbd:info 2024-11-23T06:39:48,236 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-23T06:39:48,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:48,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:49,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:49,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:50,167 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-23T06:39:50,168 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C34775%2C1732343941765.1732343990167 2024-11-23T06:39:50,178 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,179 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,179 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,179 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343942772 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343990167 2024-11-23T06:39:50,181 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36649:36649),(127.0.0.1/127.0.0.1:37563:37563)] 2024-11-23T06:39:50,181 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343942772 is not closed yet, will try archiving it next time 2024-11-23T06:39:50,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741833_1009 (size=316763) 2024-11-23T06:39:50,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741833_1009 (size=316763) 2024-11-23T06:39:50,196 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-23T06:39:50,201 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/info/00940d18ee064a1abf028ae3b617c178 is 193, key is TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd./info:regioninfo/1732343958416/Put/seqid=0 2024-11-23T06:39:50,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741878_1054 (size=6223) 2024-11-23T06:39:50,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741878_1054 (size=6223) 2024-11-23T06:39:50,207 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/info/00940d18ee064a1abf028ae3b617c178 2024-11-23T06:39:50,213 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/.tmp/info/00940d18ee064a1abf028ae3b617c178 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/info/00940d18ee064a1abf028ae3b617c178 2024-11-23T06:39:50,218 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/info/00940d18ee064a1abf028ae3b617c178, entries=5, sequenceid=21, filesize=6.1 K 2024-11-23T06:39:50,219 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-11-23T06:39:50,219 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-23T06:39:50,219 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e0fca511df23d8fe0d1d186ec7439dbd 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-23T06:39:50,222 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/16c3584abe60422ba92a077e16eb0dfc is 1080, key is row0251/info:/1732343988155/Put/seqid=0 2024-11-23T06:39:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741879_1055 (size=11436) 2024-11-23T06:39:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741879_1055 (size=11436) 2024-11-23T06:39:50,226 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/16c3584abe60422ba92a077e16eb0dfc 2024-11-23T06:39:50,231 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/.tmp/info/16c3584abe60422ba92a077e16eb0dfc as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/16c3584abe60422ba92a077e16eb0dfc 2024-11-23T06:39:50,235 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/16c3584abe60422ba92a077e16eb0dfc, entries=6, sequenceid=339, filesize=11.2 K 2024-11-23T06:39:50,237 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e0fca511df23d8fe0d1d186ec7439dbd in 17ms, sequenceid=339, compaction requested=true 2024-11-23T06:39:50,237 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e0fca511df23d8fe0d1d186ec7439dbd: 2024-11-23T06:39:50,237 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 23483c3a1a3b1df53f7352fa2f57ebb8: 2024-11-23T06:39:50,237 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C34775%2C1732343941765.1732343990237 2024-11-23T06:39:50,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,241 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,241 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,241 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343990167 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343990237 2024-11-23T06:39:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741877_1053 (size=731) 2024-11-23T06:39:50,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741877_1053 (size=731) 2024-11-23T06:39:50,244 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343942772 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/oldWALs/df2f15951535%2C34775%2C1732343941765.1732343942772 2024-11-23T06:39:50,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36649:36649),(127.0.0.1/127.0.0.1:37563:37563)] 2024-11-23T06:39:50,245 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T06:39:50,245 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/WALs/df2f15951535,34775,1732343941765/df2f15951535%2C34775%2C1732343941765.1732343990167 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/oldWALs/df2f15951535%2C34775%2C1732343941765.1732343990167 2024-11-23T06:39:50,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:39:50,245 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:39:50,245 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:39:50,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:50,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:50,246 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:39:50,246 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:39:50,246 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=507231215, stopped=false 2024-11-23T06:39:50,246 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,44159,1732343941568 2024-11-23T06:39:50,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:50,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:50,314 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:39:50,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:50,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:50,314 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:39:50,315 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:39:50,315 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:50,315 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,34775,1732343941765' ***** 2024-11-23T06:39:50,315 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:39:50,315 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:50,315 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:39:50,316 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(3091): Received CLOSE for e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(3091): Received CLOSE for 23483c3a1a3b1df53f7352fa2f57ebb8 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(959): stopping server df2f15951535,34775,1732343941765 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:39:50,316 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e0fca511df23d8fe0d1d186ec7439dbd, disabling compactions & flushes 2024-11-23T06:39:50,316 INFO [RS:0;df2f15951535:34775 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:34775. 2024-11-23T06:39:50,316 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:50,316 DEBUG [RS:0;df2f15951535:34775 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:39:50,316 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:50,317 DEBUG [RS:0;df2f15951535:34775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:50,317 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. after waiting 0 ms 2024-11-23T06:39:50,317 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:50,317 INFO [RS:0;df2f15951535:34775 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:39:50,317 INFO [RS:0;df2f15951535:34775 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:39:50,317 INFO [RS:0;df2f15951535:34775 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:39:50,317 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:39:50,317 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-23T06:39:50,317 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, e0fca511df23d8fe0d1d186ec7439dbd=TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd., 23483c3a1a3b1df53f7352fa2f57ebb8=TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.} 2024-11-23T06:39:50,317 DEBUG [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 23483c3a1a3b1df53f7352fa2f57ebb8, e0fca511df23d8fe0d1d186ec7439dbd 2024-11-23T06:39:50,317 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:39:50,317 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:39:50,317 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:39:50,317 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:39:50,317 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:39:50,317 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc->hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f-top, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/602878de4f404edcae942151c1fed03b, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/4e2a662f63ba41a8a522f357083330b6, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1a4a949c59954087a8dd760425712715, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bb04f312563d439883c08f7bae9fd689, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bbf0d1c6de5946a79719d6dee6dfa700, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e810c93a7950429685e04229284a5b57, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/9fe52524f47d4d088ab0975e4c5d17ba, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/739ad169584140ad8af95289cc7ffa34, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e62c42108d9b4a91a1e39ef4e0ed4018, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1783204f1d5a4bf48d65d56b3292f22e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c04f9a0b54334d2ea2713c46c9f0b581, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/a4a19b19e6334d2bb8d3124b5cbd3805, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6a7dada823934fca83cdce00fda0c855, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/cbbca5d6ec304a97bdce538327553853, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/7de5d767115c4035a86c4fd30f1766b3, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1439b4842b4041398de1d011155ce20e, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/f1a67b03c3a742b392080e18265aec39, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6122ce00ef1549bb8e92805fd49820bf] to archive 2024-11-23T06:39:50,319 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T06:39:50,322 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:50,323 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-23T06:39:50,324 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-c7a6d4753f1d472fb98773b765b0c66e 2024-11-23T06:39:50,324 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:39:50,324 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:39:50,324 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343990317Running coprocessor pre-close hooks at 1732343990317Disabling compacts and flushes for region at 1732343990317Disabling writes for close at 1732343990317Writing region close event to WAL at 1732343990319 (+2 ms)Running coprocessor post-close hooks at 1732343990324 (+5 ms)Closed at 1732343990324 2024-11-23T06:39:50,324 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:39:50,325 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/602878de4f404edcae942151c1fed03b to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/602878de4f404edcae942151c1fed03b 2024-11-23T06:39:50,326 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/TestLogRolling-testLogRolling=e7784e49abc2b30af0cdcae90903a5dc-d174f893f939440abc45f9c8cfb97219 2024-11-23T06:39:50,327 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/4e2a662f63ba41a8a522f357083330b6 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/4e2a662f63ba41a8a522f357083330b6 2024-11-23T06:39:50,328 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1a4a949c59954087a8dd760425712715 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1a4a949c59954087a8dd760425712715 2024-11-23T06:39:50,329 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bb04f312563d439883c08f7bae9fd689 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bb04f312563d439883c08f7bae9fd689 2024-11-23T06:39:50,330 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bbf0d1c6de5946a79719d6dee6dfa700 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/bbf0d1c6de5946a79719d6dee6dfa700 2024-11-23T06:39:50,331 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e810c93a7950429685e04229284a5b57 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e810c93a7950429685e04229284a5b57 2024-11-23T06:39:50,331 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/9fe52524f47d4d088ab0975e4c5d17ba to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/9fe52524f47d4d088ab0975e4c5d17ba 2024-11-23T06:39:50,332 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/739ad169584140ad8af95289cc7ffa34 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/739ad169584140ad8af95289cc7ffa34 2024-11-23T06:39:50,333 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e62c42108d9b4a91a1e39ef4e0ed4018 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/e62c42108d9b4a91a1e39ef4e0ed4018 2024-11-23T06:39:50,334 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1783204f1d5a4bf48d65d56b3292f22e to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1783204f1d5a4bf48d65d56b3292f22e 2024-11-23T06:39:50,335 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c04f9a0b54334d2ea2713c46c9f0b581 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/c04f9a0b54334d2ea2713c46c9f0b581 2024-11-23T06:39:50,336 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/a4a19b19e6334d2bb8d3124b5cbd3805 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/a4a19b19e6334d2bb8d3124b5cbd3805 2024-11-23T06:39:50,337 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6a7dada823934fca83cdce00fda0c855 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6a7dada823934fca83cdce00fda0c855 2024-11-23T06:39:50,338 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/cbbca5d6ec304a97bdce538327553853 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/cbbca5d6ec304a97bdce538327553853 2024-11-23T06:39:50,340 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/7de5d767115c4035a86c4fd30f1766b3 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/7de5d767115c4035a86c4fd30f1766b3 2024-11-23T06:39:50,341 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1439b4842b4041398de1d011155ce20e to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/1439b4842b4041398de1d011155ce20e 2024-11-23T06:39:50,342 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/f1a67b03c3a742b392080e18265aec39 to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/f1a67b03c3a742b392080e18265aec39 2024-11-23T06:39:50,343 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6122ce00ef1549bb8e92805fd49820bf to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/info/6122ce00ef1549bb8e92805fd49820bf 2024-11-23T06:39:50,343 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=df2f15951535:44159 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T06:39:50,343 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [602878de4f404edcae942151c1fed03b=42984, 4e2a662f63ba41a8a522f357083330b6=12516, 1a4a949c59954087a8dd760425712715=63636, bb04f312563d439883c08f7bae9fd689=17906, bbf0d1c6de5946a79719d6dee6dfa700=20078, e810c93a7950429685e04229284a5b57=90765, 9fe52524f47d4d088ab0975e4c5d17ba=16828, 739ad169584140ad8af95289cc7ffa34=21156, e62c42108d9b4a91a1e39ef4e0ed4018=114587, 1783204f1d5a4bf48d65d56b3292f22e=12516, c04f9a0b54334d2ea2713c46c9f0b581=19000, a4a19b19e6334d2bb8d3124b5cbd3805=147234, 6a7dada823934fca83cdce00fda0c855=23316, cbbca5d6ec304a97bdce538327553853=12523, 7de5d767115c4035a86c4fd30f1766b3=167841, 1439b4842b4041398de1d011155ce20e=17918, f1a67b03c3a742b392080e18265aec39=20092, 6122ce00ef1549bb8e92805fd49820bf=19013] 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e0fca511df23d8fe0d1d186ec7439dbd/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-11-23T06:39:50,347 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e0fca511df23d8fe0d1d186ec7439dbd: Waiting for close lock at 1732343990316Running coprocessor pre-close hooks at 1732343990316Disabling compacts and flushes for region at 1732343990316Disabling writes for close at 1732343990317 (+1 ms)Writing region close event to WAL at 1732343990344 (+27 ms)Running coprocessor post-close hooks at 1732343990347 (+3 ms)Closed at 1732343990347 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732343957685.e0fca511df23d8fe0d1d186ec7439dbd. 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 23483c3a1a3b1df53f7352fa2f57ebb8, disabling compactions & flushes 2024-11-23T06:39:50,347 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. after waiting 0 ms 2024-11-23T06:39:50,347 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:50,348 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc->hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/e7784e49abc2b30af0cdcae90903a5dc/info/08c66d39bb4d435ab98f6fd06e5c588f-bottom] to archive 2024-11-23T06:39:50,348 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T06:39:50,349 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc to hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/archive/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/info/08c66d39bb4d435ab98f6fd06e5c588f.e7784e49abc2b30af0cdcae90903a5dc 2024-11-23T06:39:50,349 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-23T06:39:50,352 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/data/default/TestLogRolling-testLogRolling/23483c3a1a3b1df53f7352fa2f57ebb8/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-23T06:39:50,353 INFO [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:50,353 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 23483c3a1a3b1df53f7352fa2f57ebb8: Waiting for close lock at 1732343990347Running coprocessor pre-close hooks at 1732343990347Disabling compacts and flushes for region at 1732343990347Disabling writes for close at 1732343990347Writing region close event to WAL at 1732343990350 (+3 ms)Running coprocessor post-close hooks at 1732343990352 (+2 ms)Closed at 1732343990352 2024-11-23T06:39:50,353 DEBUG [RS_CLOSE_REGION-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732343957685.23483c3a1a3b1df53f7352fa2f57ebb8. 2024-11-23T06:39:50,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:50,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:50,517 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(976): stopping server df2f15951535,34775,1732343941765; all regions closed. 2024-11-23T06:39:50,519 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,519 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,519 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,520 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,520 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741834_1010 (size=8107) 2024-11-23T06:39:50,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741834_1010 (size=8107) 2024-11-23T06:39:50,530 DEBUG [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/oldWALs 2024-11-23T06:39:50,530 INFO [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C34775%2C1732343941765.meta:.meta(num 1732343943228) 2024-11-23T06:39:50,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,531 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,531 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,531 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741880_1056 (size=780) 2024-11-23T06:39:50,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741880_1056 (size=780) 2024-11-23T06:39:50,536 DEBUG [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/oldWALs 2024-11-23T06:39:50,536 INFO [RS:0;df2f15951535:34775 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C34775%2C1732343941765:(num 1732343990237) 2024-11-23T06:39:50,536 DEBUG [RS:0;df2f15951535:34775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:50,536 INFO [RS:0;df2f15951535:34775 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:39:50,536 INFO [RS:0;df2f15951535:34775 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:39:50,536 INFO [RS:0;df2f15951535:34775 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:39:50,536 INFO [RS:0;df2f15951535:34775 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:39:50,536 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:39:50,536 INFO [RS:0;df2f15951535:34775 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34775 2024-11-23T06:39:50,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:39:50,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,34775,1732343941765 2024-11-23T06:39:50,549 INFO [RS:0;df2f15951535:34775 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:39:50,559 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,34775,1732343941765] 2024-11-23T06:39:50,569 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,34775,1732343941765 already deleted, retry=false 2024-11-23T06:39:50,569 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,34775,1732343941765 expired; onlineServers=0 2024-11-23T06:39:50,569 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,44159,1732343941568' ***** 2024-11-23T06:39:50,569 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:39:50,570 INFO [M:0;df2f15951535:44159 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:39:50,570 INFO [M:0;df2f15951535:44159 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:39:50,570 DEBUG [M:0;df2f15951535:44159 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:39:50,570 DEBUG [M:0;df2f15951535:44159 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:39:50,570 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:39:50,570 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343942553 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343942553,5,FailOnTimeoutGroup] 2024-11-23T06:39:50,570 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343942553 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343942553,5,FailOnTimeoutGroup] 2024-11-23T06:39:50,570 INFO [M:0;df2f15951535:44159 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:39:50,570 INFO [M:0;df2f15951535:44159 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:39:50,570 DEBUG [M:0;df2f15951535:44159 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:39:50,570 INFO [M:0;df2f15951535:44159 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:39:50,571 INFO [M:0;df2f15951535:44159 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:39:50,571 INFO [M:0;df2f15951535:44159 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:39:50,571 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:39:50,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:39:50,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:50,580 DEBUG [M:0;df2f15951535:44159 {}] zookeeper.ZKUtil(347): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:39:50,580 WARN [M:0;df2f15951535:44159 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:39:50,581 INFO [M:0;df2f15951535:44159 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/.lastflushedseqids 2024-11-23T06:39:50,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741881_1057 (size=228) 2024-11-23T06:39:50,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741881_1057 (size=228) 2024-11-23T06:39:50,590 INFO [M:0;df2f15951535:44159 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:39:50,590 INFO [M:0;df2f15951535:44159 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:39:50,590 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:39:50,590 INFO [M:0;df2f15951535:44159 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:50,590 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:50,590 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:39:50,590 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:50,591 INFO [M:0;df2f15951535:44159 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-23T06:39:50,608 DEBUG [M:0;df2f15951535:44159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/df4b9fb4beb7435a9b0ffde848a4257d is 82, key is hbase:meta,,1/info:regioninfo/1732343943251/Put/seqid=0 2024-11-23T06:39:50,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741882_1058 (size=5672) 2024-11-23T06:39:50,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741882_1058 (size=5672) 2024-11-23T06:39:50,612 INFO [M:0;df2f15951535:44159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/df4b9fb4beb7435a9b0ffde848a4257d 2024-11-23T06:39:50,629 DEBUG [M:0;df2f15951535:44159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3b5fc31bfd54e9d801a38b460c735ec is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732343943786/Put/seqid=0 2024-11-23T06:39:50,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741883_1059 (size=7089) 2024-11-23T06:39:50,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741883_1059 (size=7089) 2024-11-23T06:39:50,634 INFO [M:0;df2f15951535:44159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3b5fc31bfd54e9d801a38b460c735ec 2024-11-23T06:39:50,638 INFO [M:0;df2f15951535:44159 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3b5fc31bfd54e9d801a38b460c735ec 2024-11-23T06:39:50,642 INFO [regionserver/df2f15951535:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:39:50,652 DEBUG [M:0;df2f15951535:44159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a4322bcb0ab14936974b1d8db1745eb5 is 69, key is df2f15951535,34775,1732343941765/rs:state/1732343942620/Put/seqid=0 2024-11-23T06:39:50,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741884_1060 (size=5156) 2024-11-23T06:39:50,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741884_1060 (size=5156) 2024-11-23T06:39:50,657 INFO [M:0;df2f15951535:44159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a4322bcb0ab14936974b1d8db1745eb5 2024-11-23T06:39:50,659 INFO [RS:0;df2f15951535:34775 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:39:50,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:50,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34775-0x101666ac55b0001, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:50,659 INFO [RS:0;df2f15951535:34775 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,34775,1732343941765; zookeeper connection closed. 2024-11-23T06:39:50,659 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70ed0a34 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70ed0a34 2024-11-23T06:39:50,659 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T06:39:50,674 DEBUG [M:0;df2f15951535:44159 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/83efefed118f4fefbe5bb249043fb01a is 52, key is load_balancer_on/state:d/1732343943402/Put/seqid=0 2024-11-23T06:39:50,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741885_1061 (size=5056) 2024-11-23T06:39:50,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741885_1061 (size=5056) 2024-11-23T06:39:50,678 INFO [M:0;df2f15951535:44159 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/83efefed118f4fefbe5bb249043fb01a 2024-11-23T06:39:50,682 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/df4b9fb4beb7435a9b0ffde848a4257d as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/df4b9fb4beb7435a9b0ffde848a4257d 2024-11-23T06:39:50,685 INFO [M:0;df2f15951535:44159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/df4b9fb4beb7435a9b0ffde848a4257d, entries=8, sequenceid=125, filesize=5.5 K 2024-11-23T06:39:50,686 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f3b5fc31bfd54e9d801a38b460c735ec as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3b5fc31bfd54e9d801a38b460c735ec 2024-11-23T06:39:50,691 INFO [M:0;df2f15951535:44159 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f3b5fc31bfd54e9d801a38b460c735ec 2024-11-23T06:39:50,691 INFO [M:0;df2f15951535:44159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f3b5fc31bfd54e9d801a38b460c735ec, entries=13, sequenceid=125, filesize=6.9 K 2024-11-23T06:39:50,692 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a4322bcb0ab14936974b1d8db1745eb5 as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a4322bcb0ab14936974b1d8db1745eb5 2024-11-23T06:39:50,695 INFO [M:0;df2f15951535:44159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a4322bcb0ab14936974b1d8db1745eb5, entries=1, sequenceid=125, filesize=5.0 K 2024-11-23T06:39:50,696 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/83efefed118f4fefbe5bb249043fb01a as hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/83efefed118f4fefbe5bb249043fb01a 2024-11-23T06:39:50,700 INFO [M:0;df2f15951535:44159 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46567/user/jenkins/test-data/23bbc176-6577-ca20-82d8-f2924557f0e5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/83efefed118f4fefbe5bb249043fb01a, entries=1, sequenceid=125, filesize=4.9 K 2024-11-23T06:39:50,701 INFO [M:0;df2f15951535:44159 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false 2024-11-23T06:39:50,702 INFO [M:0;df2f15951535:44159 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:50,702 DEBUG [M:0;df2f15951535:44159 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343990590Disabling compacts and flushes for region at 1732343990590Disabling writes for close at 1732343990590Obtaining lock to block concurrent updates at 1732343990591 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343990591Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732343990591Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343990592 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343990592Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343990607 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343990607Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343990616 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343990629 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343990629Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343990638 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343990651 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343990651Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343990661 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343990673 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343990673Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cb5f18a: reopening flushed file at 1732343990681 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6137f037: reopening flushed file at 1732343990685 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e13f741: reopening flushed file at 1732343990691 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16e928d5: reopening flushed file at 1732343990696 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false at 1732343990701 (+5 ms)Writing region close event to WAL at 1732343990702 (+1 ms)Closed at 1732343990702 2024-11-23T06:39:50,702 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,703 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,703 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,703 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:50,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41329 is added to blk_1073741830_1006 (size=61320) 2024-11-23T06:39:50,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46521 is added to blk_1073741830_1006 (size=61320) 2024-11-23T06:39:50,705 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:39:50,705 INFO [M:0;df2f15951535:44159 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:39:50,705 INFO [M:0;df2f15951535:44159 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44159 2024-11-23T06:39:50,705 INFO [M:0;df2f15951535:44159 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:39:50,817 INFO [M:0;df2f15951535:44159 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:39:50,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:50,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44159-0x101666ac55b0000, quorum=127.0.0.1:56103, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:50,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f81fda7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:50,822 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41c54b7e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:39:50,822 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:39:50,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@692ba77d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:39:50,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70aed17c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir/,STOPPED} 2024-11-23T06:39:50,825 WARN [BP-1218950709-172.17.0.3-1732343939037 heartbeating to localhost/127.0.0.1:46567 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:39:50,825 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:39:50,826 WARN [BP-1218950709-172.17.0.3-1732343939037 heartbeating to localhost/127.0.0.1:46567 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1218950709-172.17.0.3-1732343939037 (Datanode Uuid 1df8e296-d8db-4b3f-9726-c0c67368dab9) service to localhost/127.0.0.1:46567 2024-11-23T06:39:50,826 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:39:50,826 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data3/current/BP-1218950709-172.17.0.3-1732343939037 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:50,826 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data4/current/BP-1218950709-172.17.0.3-1732343939037 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:50,827 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:39:50,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49059d65{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:50,829 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@417c1a7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:39:50,829 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:39:50,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2001df3c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:39:50,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1724ca70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir/,STOPPED} 2024-11-23T06:39:50,830 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:39:50,830 WARN [BP-1218950709-172.17.0.3-1732343939037 heartbeating to localhost/127.0.0.1:46567 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:39:50,830 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:39:50,830 WARN [BP-1218950709-172.17.0.3-1732343939037 heartbeating to localhost/127.0.0.1:46567 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1218950709-172.17.0.3-1732343939037 (Datanode Uuid fde251bd-1f37-478b-8f4e-c99e189d5067) service to localhost/127.0.0.1:46567 2024-11-23T06:39:50,831 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data1/current/BP-1218950709-172.17.0.3-1732343939037 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:50,831 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/cluster_9e6fa8b5-112e-f34a-85a0-388b63bd01c0/data/data2/current/BP-1218950709-172.17.0.3-1732343939037 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:50,831 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:39:50,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e6f692{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:39:50,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45bda0cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:39:50,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:39:50,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75b3fca0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:39:50,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f8818bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir/,STOPPED} 2024-11-23T06:39:50,843 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:39:50,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:39:50,880 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 207) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46567 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46567 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46567 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46567 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46567 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46567 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=517 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=94 (was 101), ProcessCount=11 (was 11), AvailableMemoryMB=7450 (was 7223) - AvailableMemoryMB LEAK? - 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=517, MaxFileDescriptor=1048576, SystemLoadAverage=94, ProcessCount=11, AvailableMemoryMB=7451 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.log.dir so I do NOT create it in target/test-data/367d18eb-353a-66bd-7c03-489bfb138096 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/61166cb9-2035-71e0-5f9c-15075530574c/hadoop.tmp.dir so I do NOT create it in target/test-data/367d18eb-353a-66bd-7c03-489bfb138096 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c, deleteOnExit=true 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/test.cache.data in system properties and HBase conf 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir in system properties and HBase conf 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T06:39:50,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T06:39:50,888 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/nfs.dump.dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/java.io.tmpdir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T06:39:50,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T06:39:50,902 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:39:50,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:39:50,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T06:39:50,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T06:39:50,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-23T06:39:51,276 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:39:51,280 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:39:51,281 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:39:51,281 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:39:51,281 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:39:51,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:39:51,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d7e0513{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:39:51,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2191d18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:39:51,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@87b2e2b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/java.io.tmpdir/jetty-localhost-36827-hadoop-hdfs-3_4_1-tests_jar-_-any-8458799589891487969/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:39:51,376 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e58a9be{HTTP/1.1, (http/1.1)}{localhost:36827} 2024-11-23T06:39:51,376 INFO [Time-limited test {}] server.Server(415): Started @297373ms 2024-11-23T06:39:51,387 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T06:39:51,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:51,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:51,875 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:39:51,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:39:51,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:39:51,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:39:51,879 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T06:39:51,879 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e687b61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:39:51,879 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46761010{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:39:51,975 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7216654a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/java.io.tmpdir/jetty-localhost-44341-hadoop-hdfs-3_4_1-tests_jar-_-any-13577468562072923590/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:51,975 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@534394c4{HTTP/1.1, (http/1.1)}{localhost:44341} 2024-11-23T06:39:51,975 INFO [Time-limited test {}] server.Server(415): Started @297972ms 2024-11-23T06:39:51,976 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:39:52,003 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T06:39:52,005 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T06:39:52,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T06:39:52,006 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T06:39:52,006 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T06:39:52,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae1ce13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir/,AVAILABLE} 2024-11-23T06:39:52,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624ed4c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T06:39:52,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1187e2f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/java.io.tmpdir/jetty-localhost-41325-hadoop-hdfs-3_4_1-tests_jar-_-any-8338166870363507438/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:52,104 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@219dec45{HTTP/1.1, (http/1.1)}{localhost:41325} 2024-11-23T06:39:52,104 INFO [Time-limited test {}] server.Server(415): Started @298101ms 2024-11-23T06:39:52,105 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T06:39:52,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:52,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:53,282 WARN [Thread-2491 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data1/current/BP-1100694356-172.17.0.3-1732343990905/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:53,282 WARN [Thread-2492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data2/current/BP-1100694356-172.17.0.3-1732343990905/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:53,298 WARN [Thread-2455 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:39:53,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe494286261a79a2 with lease ID 0x26afa62a7fedae8: Processing first storage report for DS-9912dac1-3395-4981-91b2-7211ff88b896 from datanode DatanodeRegistration(127.0.0.1:36087, datanodeUuid=ca2f5264-7b7f-4f8c-abb9-17ed6b687235, infoPort=45161, infoSecurePort=0, ipcPort=32771, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905) 2024-11-23T06:39:53,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe494286261a79a2 with lease ID 0x26afa62a7fedae8: from storage DS-9912dac1-3395-4981-91b2-7211ff88b896 node DatanodeRegistration(127.0.0.1:36087, datanodeUuid=ca2f5264-7b7f-4f8c-abb9-17ed6b687235, infoPort=45161, infoSecurePort=0, ipcPort=32771, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T06:39:53,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe494286261a79a2 with lease ID 0x26afa62a7fedae8: Processing first storage report for DS-699027cd-0a2b-4236-8d1b-8ad4f0c29379 from datanode DatanodeRegistration(127.0.0.1:36087, datanodeUuid=ca2f5264-7b7f-4f8c-abb9-17ed6b687235, infoPort=45161, infoSecurePort=0, ipcPort=32771, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905) 2024-11-23T06:39:53,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe494286261a79a2 with lease ID 0x26afa62a7fedae8: from storage DS-699027cd-0a2b-4236-8d1b-8ad4f0c29379 node DatanodeRegistration(127.0.0.1:36087, datanodeUuid=ca2f5264-7b7f-4f8c-abb9-17ed6b687235, infoPort=45161, infoSecurePort=0, ipcPort=32771, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:53,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:53,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:53,651 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data3/current/BP-1100694356-172.17.0.3-1732343990905/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:53,651 WARN [Thread-2503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data4/current/BP-1100694356-172.17.0.3-1732343990905/current, will proceed with Du for space computation calculation, 2024-11-23T06:39:53,669 WARN [Thread-2478 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T06:39:53,670 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9c63b44572cc133 with lease ID 0x26afa62a7fedae9: Processing first storage report for DS-424175d5-6b28-462f-8c76-f47a46b83615 from datanode DatanodeRegistration(127.0.0.1:41339, datanodeUuid=b37b30cc-5a57-48e3-87c8-5d87780644dc, infoPort=33545, infoSecurePort=0, ipcPort=35813, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905) 2024-11-23T06:39:53,670 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9c63b44572cc133 with lease ID 0x26afa62a7fedae9: from storage DS-424175d5-6b28-462f-8c76-f47a46b83615 node DatanodeRegistration(127.0.0.1:41339, datanodeUuid=b37b30cc-5a57-48e3-87c8-5d87780644dc, infoPort=33545, infoSecurePort=0, ipcPort=35813, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:53,671 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9c63b44572cc133 with lease ID 0x26afa62a7fedae9: Processing first storage report for DS-28d76524-5f58-4c69-a83e-0e6924061fca from datanode DatanodeRegistration(127.0.0.1:41339, datanodeUuid=b37b30cc-5a57-48e3-87c8-5d87780644dc, infoPort=33545, infoSecurePort=0, ipcPort=35813, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905) 2024-11-23T06:39:53,671 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9c63b44572cc133 with lease ID 0x26afa62a7fedae9: from storage DS-28d76524-5f58-4c69-a83e-0e6924061fca node DatanodeRegistration(127.0.0.1:41339, datanodeUuid=b37b30cc-5a57-48e3-87c8-5d87780644dc, infoPort=33545, infoSecurePort=0, ipcPort=35813, storageInfo=lv=-57;cid=testClusterID;nsid=805597368;c=1732343990905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T06:39:53,740 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096 2024-11-23T06:39:53,763 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/zookeeper_0, clientPort=55953, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T06:39:53,764 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55953 2024-11-23T06:39:53,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:53,766 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:53,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:39:53,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741825_1001 (size=7) 2024-11-23T06:39:53,777 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf with version=8 2024-11-23T06:39:53,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43385/user/jenkins/test-data/e7e5cbb5-fc65-fb80-892d-e0d92075a958/hbase-staging 2024-11-23T06:39:53,779 INFO [Time-limited test {}] client.ConnectionUtils(128): master/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T06:39:53,780 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:39:53,781 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41847 2024-11-23T06:39:53,782 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41847 connecting to ZooKeeper ensemble=127.0.0.1:55953 2024-11-23T06:39:53,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:418470x0, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:39:53,835 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41847-0x101666b914e0000 connected 2024-11-23T06:39:53,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:53,918 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:53,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:53,920 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf, hbase.cluster.distributed=false 2024-11-23T06:39:53,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:39:53,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41847 2024-11-23T06:39:53,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41847 2024-11-23T06:39:53,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41847 2024-11-23T06:39:53,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41847 2024-11-23T06:39:53,924 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41847 2024-11-23T06:39:53,940 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/df2f15951535:0 server-side Connection retries=45 2024-11-23T06:39:53,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:53,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:53,940 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T06:39:53,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T06:39:53,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T06:39:53,941 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T06:39:53,941 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T06:39:53,941 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33035 2024-11-23T06:39:53,942 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33035 connecting to ZooKeeper ensemble=127.0.0.1:55953 2024-11-23T06:39:53,942 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:53,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:53,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330350x0, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T06:39:53,956 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33035-0x101666b914e0001 connected 2024-11-23T06:39:53,957 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:53,957 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T06:39:53,958 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T06:39:53,958 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T06:39:53,960 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T06:39:53,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33035 2024-11-23T06:39:53,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33035 2024-11-23T06:39:53,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33035 2024-11-23T06:39:53,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33035 2024-11-23T06:39:53,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33035 2024-11-23T06:39:53,975 DEBUG [M:0;df2f15951535:41847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;df2f15951535:41847 2024-11-23T06:39:53,975 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/df2f15951535,41847,1732343993779 2024-11-23T06:39:53,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:53,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:53,988 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/df2f15951535,41847,1732343993779 2024-11-23T06:39:54,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T06:39:54,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,002 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T06:39:54,002 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/df2f15951535,41847,1732343993779 from backup master directory 2024-11-23T06:39:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/df2f15951535,41847,1732343993779 2024-11-23T06:39:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:54,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T06:39:54,011 WARN [master/df2f15951535:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:39:54,011 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=df2f15951535,41847,1732343993779 2024-11-23T06:39:54,018 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/hbase.id] with ID: 252c5981-4640-4bf9-a4e7-0e0ddb234c15 2024-11-23T06:39:54,018 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/.tmp/hbase.id 2024-11-23T06:39:54,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:39:54,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741826_1002 (size=42) 2024-11-23T06:39:54,028 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/.tmp/hbase.id]:[hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/hbase.id] 2024-11-23T06:39:54,042 INFO [master/df2f15951535:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:54,042 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T06:39:54,044 INFO [master/df2f15951535:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T06:39:54,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:39:54,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741827_1003 (size=196) 2024-11-23T06:39:54,062 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T06:39:54,063 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T06:39:54,063 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:54,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:39:54,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741828_1004 (size=1189) 2024-11-23T06:39:54,072 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store 2024-11-23T06:39:54,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:39:54,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741829_1005 (size=34) 2024-11-23T06:39:54,078 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:54,078 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:39:54,078 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:54,078 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:54,079 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:39:54,079 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:54,079 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:54,079 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343994078Disabling compacts and flushes for region at 1732343994078Disabling writes for close at 1732343994079 (+1 ms)Writing region close event to WAL at 1732343994079Closed at 1732343994079 2024-11-23T06:39:54,079 WARN [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/.initializing 2024-11-23T06:39:54,079 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/WALs/df2f15951535,41847,1732343993779 2024-11-23T06:39:54,081 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C41847%2C1732343993779, suffix=, logDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/WALs/df2f15951535,41847,1732343993779, archiveDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/oldWALs, maxLogs=10 2024-11-23T06:39:54,081 INFO [master/df2f15951535:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C41847%2C1732343993779.1732343994081 2024-11-23T06:39:54,085 INFO [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/WALs/df2f15951535,41847,1732343993779/df2f15951535%2C41847%2C1732343993779.1732343994081 2024-11-23T06:39:54,086 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45161:45161),(127.0.0.1/127.0.0.1:33545:33545)] 2024-11-23T06:39:54,087 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:39:54,087 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:54,087 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,087 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T06:39:54,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T06:39:54,091 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:54,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T06:39:54,092 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:54,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T06:39:54,093 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,094 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T06:39:54,094 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,094 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,094 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,095 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,095 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,096 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T06:39:54,096 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T06:39:54,102 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:39:54,102 INFO [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846738, jitterRate=0.07668320834636688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T06:39:54,103 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732343994087Initializing all the Stores at 1732343994087Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994088 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343994088Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343994088Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343994088Cleaning up temporary data from old regions at 1732343994095 (+7 ms)Region opened successfully at 1732343994103 (+8 ms) 2024-11-23T06:39:54,103 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T06:39:54,105 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55abcdbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:39:54,106 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T06:39:54,106 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T06:39:54,106 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T06:39:54,106 INFO [master/df2f15951535:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T06:39:54,107 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T06:39:54,107 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T06:39:54,107 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T06:39:54,110 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T06:39:54,110 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T06:39:54,116 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T06:39:54,116 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T06:39:54,117 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T06:39:54,127 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T06:39:54,127 INFO [master/df2f15951535:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T06:39:54,128 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T06:39:54,138 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T06:39:54,139 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T06:39:54,148 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T06:39:54,152 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T06:39:54,158 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T06:39:54,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:54,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:54,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,170 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=df2f15951535,41847,1732343993779, sessionid=0x101666b914e0000, setting cluster-up flag (Was=false) 2024-11-23T06:39:54,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,222 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T06:39:54,223 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,41847,1732343993779 2024-11-23T06:39:54,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,275 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T06:39:54,277 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=df2f15951535,41847,1732343993779 2024-11-23T06:39:54,279 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T06:39:54,282 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:54,282 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T06:39:54,283 INFO [master/df2f15951535:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T06:39:54,283 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: df2f15951535,41847,1732343993779 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/df2f15951535:0, corePoolSize=5, maxPoolSize=5 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/df2f15951535:0, corePoolSize=10, maxPoolSize=10 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:39:54,285 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732344024286 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T06:39:54,286 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T06:39:54,286 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:54,286 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T06:39:54,287 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,287 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T06:39:54,287 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T06:39:54,287 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T06:39:54,287 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T06:39:54,287 INFO [master/df2f15951535:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T06:39:54,288 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343994287,5,FailOnTimeoutGroup] 2024-11-23T06:39:54,288 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,288 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343994288,5,FailOnTimeoutGroup] 2024-11-23T06:39:54,288 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,288 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T06:39:54,288 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,288 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,288 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T06:39:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:39:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741831_1007 (size=1321) 2024-11-23T06:39:54,293 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T06:39:54,293 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf 2024-11-23T06:39:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:39:54,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741832_1008 (size=32) 2024-11-23T06:39:54,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:54,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:39:54,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:39:54,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:39:54,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:39:54,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:39:54,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:39:54,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:39:54,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:39:54,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:39:54,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740 2024-11-23T06:39:54,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740 2024-11-23T06:39:54,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:39:54,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:39:54,310 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:39:54,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:39:54,313 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T06:39:54,314 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693192, jitterRate=-0.11856165528297424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:39:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732343994300Initializing all the Stores at 1732343994301 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994301Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994301Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343994301Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994301Cleaning up temporary data from old regions at 1732343994309 (+8 ms)Region opened successfully at 1732343994315 (+6 ms) 2024-11-23T06:39:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:39:54,315 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:39:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:39:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:39:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:39:54,315 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:39:54,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343994315Disabling compacts and flushes for region at 1732343994315Disabling writes for close at 1732343994315Writing region close event to WAL at 1732343994315Closed at 1732343994315 2024-11-23T06:39:54,317 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:54,317 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T06:39:54,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T06:39:54,318 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:39:54,319 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T06:39:54,365 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(746): ClusterId : 252c5981-4640-4bf9-a4e7-0e0ddb234c15 2024-11-23T06:39:54,365 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T06:39:54,380 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T06:39:54,380 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T06:39:54,393 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T06:39:54,394 DEBUG [RS:0;df2f15951535:33035 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e350519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=df2f15951535/172.17.0.3:0 2024-11-23T06:39:54,411 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;df2f15951535:33035 2024-11-23T06:39:54,411 INFO [RS:0;df2f15951535:33035 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T06:39:54,411 INFO [RS:0;df2f15951535:33035 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T06:39:54,411 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T06:39:54,412 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(2659): reportForDuty to master=df2f15951535,41847,1732343993779 with port=33035, startcode=1732343993940 2024-11-23T06:39:54,412 DEBUG [RS:0;df2f15951535:33035 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T06:39:54,414 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47767, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T06:39:54,415 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41847 {}] master.ServerManager(363): Checking decommissioned status of RegionServer df2f15951535,33035,1732343993940 2024-11-23T06:39:54,415 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41847 {}] master.ServerManager(517): Registering regionserver=df2f15951535,33035,1732343993940 2024-11-23T06:39:54,416 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf 2024-11-23T06:39:54,416 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40021 2024-11-23T06:39:54,416 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T06:39:54,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:39:54,422 DEBUG [RS:0;df2f15951535:33035 {}] zookeeper.ZKUtil(111): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/df2f15951535,33035,1732343993940 2024-11-23T06:39:54,422 WARN [RS:0;df2f15951535:33035 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T06:39:54,422 INFO [RS:0;df2f15951535:33035 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:54,422 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/df2f15951535,33035,1732343993940 2024-11-23T06:39:54,422 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [df2f15951535,33035,1732343993940] 2024-11-23T06:39:54,426 INFO [RS:0;df2f15951535:33035 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T06:39:54,427 INFO [RS:0;df2f15951535:33035 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T06:39:54,427 INFO [RS:0;df2f15951535:33035 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T06:39:54,427 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,428 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T06:39:54,428 INFO [RS:0;df2f15951535:33035 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T06:39:54,428 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,428 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,428 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,428 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/df2f15951535:0, corePoolSize=2, maxPoolSize=2 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/df2f15951535:0, corePoolSize=1, maxPoolSize=1 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:39:54,429 DEBUG [RS:0;df2f15951535:33035 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/df2f15951535:0, corePoolSize=3, maxPoolSize=3 2024-11-23T06:39:54,429 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,429 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,429 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,429 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,429 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,429 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,33035,1732343993940-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:39:54,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:54,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:54,447 INFO [RS:0;df2f15951535:33035 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T06:39:54,447 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,33035,1732343993940-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,447 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,447 INFO [RS:0;df2f15951535:33035 {}] regionserver.Replication(171): df2f15951535,33035,1732343993940 started 2024-11-23T06:39:54,459 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,459 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1482): Serving as df2f15951535,33035,1732343993940, RpcServer on df2f15951535/172.17.0.3:33035, sessionid=0x101666b914e0001 2024-11-23T06:39:54,459 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T06:39:54,459 DEBUG [RS:0;df2f15951535:33035 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager df2f15951535,33035,1732343993940 2024-11-23T06:39:54,459 DEBUG [RS:0;df2f15951535:33035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,33035,1732343993940' 2024-11-23T06:39:54,459 DEBUG [RS:0;df2f15951535:33035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T06:39:54,460 DEBUG [RS:0;df2f15951535:33035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T06:39:54,461 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T06:39:54,461 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T06:39:54,461 DEBUG [RS:0;df2f15951535:33035 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager df2f15951535,33035,1732343993940 2024-11-23T06:39:54,461 DEBUG [RS:0;df2f15951535:33035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'df2f15951535,33035,1732343993940' 2024-11-23T06:39:54,461 DEBUG [RS:0;df2f15951535:33035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T06:39:54,461 DEBUG [RS:0;df2f15951535:33035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T06:39:54,462 DEBUG [RS:0;df2f15951535:33035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T06:39:54,462 INFO [RS:0;df2f15951535:33035 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T06:39:54,462 INFO [RS:0;df2f15951535:33035 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T06:39:54,469 WARN [df2f15951535:41847 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T06:39:54,565 INFO [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C33035%2C1732343993940, suffix=, logDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/df2f15951535,33035,1732343993940, archiveDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs, maxLogs=32 2024-11-23T06:39:54,566 INFO [RS:0;df2f15951535:33035 {}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C33035%2C1732343993940.1732343994566 2024-11-23T06:39:54,576 INFO [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/df2f15951535,33035,1732343993940/df2f15951535%2C33035%2C1732343993940.1732343994566 2024-11-23T06:39:54,577 DEBUG [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33545:33545),(127.0.0.1/127.0.0.1:45161:45161)] 2024-11-23T06:39:54,719 DEBUG [df2f15951535:41847 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T06:39:54,720 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=df2f15951535,33035,1732343993940 2024-11-23T06:39:54,721 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,33035,1732343993940, state=OPENING 2024-11-23T06:39:54,727 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T06:39:54,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:54,738 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:54,738 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:54,738 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T06:39:54,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,33035,1732343993940}] 2024-11-23T06:39:54,893 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T06:39:54,896 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T06:39:54,902 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T06:39:54,902 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:54,904 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=df2f15951535%2C33035%2C1732343993940.meta, suffix=.meta, logDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/df2f15951535,33035,1732343993940, archiveDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs, maxLogs=32 2024-11-23T06:39:54,905 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor df2f15951535%2C33035%2C1732343993940.meta.1732343994904.meta 2024-11-23T06:39:54,909 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/df2f15951535,33035,1732343993940/df2f15951535%2C33035%2C1732343993940.meta.1732343994904.meta 2024-11-23T06:39:54,912 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33545:33545),(127.0.0.1/127.0.0.1:45161:45161)] 2024-11-23T06:39:54,916 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T06:39:54,917 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T06:39:54,917 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T06:39:54,917 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T06:39:54,917 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T06:39:54,917 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T06:39:54,917 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T06:39:54,917 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T06:39:54,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T06:39:54,919 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T06:39:54,919 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T06:39:54,920 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T06:39:54,920 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T06:39:54,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T06:39:54,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T06:39:54,922 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T06:39:54,922 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T06:39:54,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T06:39:54,923 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T06:39:54,923 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740 2024-11-23T06:39:54,924 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740 2024-11-23T06:39:54,926 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T06:39:54,926 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T06:39:54,926 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T06:39:54,927 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T06:39:54,928 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766975, jitterRate=-0.024741679430007935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T06:39:54,928 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T06:39:54,928 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732343994917Writing region info on filesystem at 1732343994917Initializing all the Stores at 1732343994918 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994918Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994918Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732343994918Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732343994918Cleaning up temporary data from old regions at 1732343994926 (+8 ms)Running coprocessor post-open hooks at 1732343994928 (+2 ms)Region opened successfully at 1732343994928 2024-11-23T06:39:54,929 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732343994892 2024-11-23T06:39:54,931 DEBUG [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T06:39:54,931 INFO [RS_OPEN_META-regionserver/df2f15951535:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T06:39:54,932 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=df2f15951535,33035,1732343993940 2024-11-23T06:39:54,933 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as df2f15951535,33035,1732343993940, state=OPEN 2024-11-23T06:39:54,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:39:54,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T06:39:54,975 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=df2f15951535,33035,1732343993940 2024-11-23T06:39:54,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:54,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T06:39:54,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T06:39:54,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=df2f15951535,33035,1732343993940 in 237 msec 2024-11-23T06:39:54,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T06:39:54,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 664 msec 2024-11-23T06:39:54,986 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T06:39:54,986 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T06:39:54,988 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:39:54,988 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,33035,1732343993940, seqNum=-1] 2024-11-23T06:39:54,988 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:39:54,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56273, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:39:54,995 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 713 msec 2024-11-23T06:39:54,995 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732343994995, completionTime=-1 2024-11-23T06:39:54,995 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T06:39:54,995 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732344054997 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732344114997 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41847,1732343993779-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41847,1732343993779-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41847,1732343993779-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-df2f15951535:41847, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,997 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,998 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:54,999 DEBUG [master/df2f15951535:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T06:39:55,001 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.990sec 2024-11-23T06:39:55,001 INFO [master/df2f15951535:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T06:39:55,001 INFO [master/df2f15951535:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T06:39:55,001 INFO [master/df2f15951535:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T06:39:55,001 INFO [master/df2f15951535:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T06:39:55,002 INFO [master/df2f15951535:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T06:39:55,002 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41847,1732343993779-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T06:39:55,002 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41847,1732343993779-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T06:39:55,004 DEBUG [master/df2f15951535:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T06:39:55,004 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T06:39:55,004 INFO [master/df2f15951535:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=df2f15951535,41847,1732343993779-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T06:39:55,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46ae05a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:39:55,065 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request df2f15951535,41847,-1 for getting cluster id 2024-11-23T06:39:55,065 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T06:39:55,067 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '252c5981-4640-4bf9-a4e7-0e0ddb234c15' 2024-11-23T06:39:55,068 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T06:39:55,068 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "252c5981-4640-4bf9-a4e7-0e0ddb234c15" 2024-11-23T06:39:55,068 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@696bb5d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:39:55,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [df2f15951535,41847,-1] 2024-11-23T06:39:55,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T06:39:55,069 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:55,070 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36078, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T06:39:55,072 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@719b7db0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T06:39:55,072 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T06:39:55,073 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=df2f15951535,33035,1732343993940, seqNum=-1] 2024-11-23T06:39:55,074 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T06:39:55,075 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34720, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T06:39:55,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=df2f15951535,41847,1732343993779 2024-11-23T06:39:55,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T06:39:55,080 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T06:39:55,080 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T06:39:55,083 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs, maxLogs=32 2024-11-23T06:39:55,084 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732343995083 2024-11-23T06:39:55,090 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/test.com,8080,1/test.com%2C8080%2C1.1732343995083 2024-11-23T06:39:55,092 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45161:45161),(127.0.0.1/127.0.0.1:33545:33545)] 2024-11-23T06:39:55,093 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732343995093 2024-11-23T06:39:55,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,098 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/test.com,8080,1/test.com%2C8080%2C1.1732343995083 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/test.com,8080,1/test.com%2C8080%2C1.1732343995093 2024-11-23T06:39:55,098 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33545:33545),(127.0.0.1/127.0.0.1:45161:45161)] 2024-11-23T06:39:55,098 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/test.com,8080,1/test.com%2C8080%2C1.1732343995083 is not closed yet, will try archiving it next time 2024-11-23T06:39:55,099 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,099 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741835_1011 (size=93) 2024-11-23T06:39:55,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741835_1011 (size=93) 2024-11-23T06:39:55,100 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/WALs/test.com,8080,1/test.com%2C8080%2C1.1732343995083 to hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs/test.com%2C8080%2C1.1732343995083 2024-11-23T06:39:55,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741836_1012 (size=93) 2024-11-23T06:39:55,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741836_1012 (size=93) 2024-11-23T06:39:55,102 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs 2024-11-23T06:39:55,102 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732343995093) 2024-11-23T06:39:55,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T06:39:55,102 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:39:55,102 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:39:55,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:55,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:55,103 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T06:39:55,103 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T06:39:55,103 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2137242379, stopped=false 2024-11-23T06:39:55,103 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=df2f15951535,41847,1732343993779 2024-11-23T06:39:55,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:55,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T06:39:55,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:55,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:55,125 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:39:55,125 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T06:39:55,125 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:39:55,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:55,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:55,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T06:39:55,125 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'df2f15951535,33035,1732343993940' ***** 2024-11-23T06:39:55,125 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T06:39:55,126 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(959): stopping server df2f15951535,33035,1732343993940 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;df2f15951535:33035. 2024-11-23T06:39:55,126 DEBUG [RS:0;df2f15951535:33035 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T06:39:55,126 DEBUG [RS:0;df2f15951535:33035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T06:39:55,126 INFO [RS:0;df2f15951535:33035 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T06:39:55,127 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T06:39:55,127 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T06:39:55,127 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T06:39:55,127 DEBUG [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-23T06:39:55,127 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T06:39:55,127 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T06:39:55,127 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T06:39:55,127 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T06:39:55,127 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T06:39:55,127 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-23T06:39:55,145 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/.tmp/ns/93c13b963c3044938c4ef10281c0b45b is 43, key is default/ns:d/1732343994990/Put/seqid=0 2024-11-23T06:39:55,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741837_1013 (size=5153) 2024-11-23T06:39:55,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741837_1013 (size=5153) 2024-11-23T06:39:55,150 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/.tmp/ns/93c13b963c3044938c4ef10281c0b45b 2024-11-23T06:39:55,154 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/.tmp/ns/93c13b963c3044938c4ef10281c0b45b as hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/ns/93c13b963c3044938c4ef10281c0b45b 2024-11-23T06:39:55,158 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/ns/93c13b963c3044938c4ef10281c0b45b, entries=2, sequenceid=6, filesize=5.0 K 2024-11-23T06:39:55,159 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-11-23T06:39:55,159 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T06:39:55,163 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T06:39:55,163 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T06:39:55,163 INFO [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T06:39:55,163 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732343995127Running coprocessor pre-close hooks at 1732343995127Disabling compacts and flushes for region at 1732343995127Disabling writes for close at 1732343995127Obtaining lock to block concurrent updates at 1732343995127Preparing flush snapshotting stores in 1588230740 at 1732343995127Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732343995128 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732343995129 (+1 ms)Flushing 1588230740/ns: creating writer at 1732343995129Flushing 1588230740/ns: appending metadata at 1732343995145 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732343995145Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f6742fe: reopening flushed file at 1732343995154 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1732343995159 (+5 ms)Writing region close event to WAL at 1732343995160 (+1 ms)Running coprocessor post-close hooks at 1732343995163 (+3 ms)Closed at 1732343995163 2024-11-23T06:39:55,164 DEBUG [RS_CLOSE_META-regionserver/df2f15951535:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T06:39:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,327 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(976): stopping server df2f15951535,33035,1732343993940; all regions closed. 2024-11-23T06:39:55,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,328 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,328 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,328 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741834_1010 (size=1152) 2024-11-23T06:39:55,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741834_1010 (size=1152) 2024-11-23T06:39:55,331 DEBUG [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs 2024-11-23T06:39:55,331 INFO [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C33035%2C1732343993940.meta:.meta(num 1732343994904) 2024-11-23T06:39:55,331 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,331 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,332 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,332 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,332 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741833_1009 (size=93) 2024-11-23T06:39:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741833_1009 (size=93) 2024-11-23T06:39:55,335 DEBUG [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/oldWALs 2024-11-23T06:39:55,335 INFO [RS:0;df2f15951535:33035 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog df2f15951535%2C33035%2C1732343993940:(num 1732343994566) 2024-11-23T06:39:55,335 DEBUG [RS:0;df2f15951535:33035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T06:39:55,335 INFO [RS:0;df2f15951535:33035 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T06:39:55,335 INFO [RS:0;df2f15951535:33035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:39:55,335 INFO [RS:0;df2f15951535:33035 {}] hbase.ChoreService(370): Chore service for: regionserver/df2f15951535:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T06:39:55,335 INFO [RS:0;df2f15951535:33035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:39:55,335 INFO [regionserver/df2f15951535:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:39:55,336 INFO [RS:0;df2f15951535:33035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33035 2024-11-23T06:39:55,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/df2f15951535,33035,1732343993940 2024-11-23T06:39:55,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T06:39:55,348 INFO [RS:0;df2f15951535:33035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:39:55,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T06:39:55,359 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [df2f15951535,33035,1732343993940] 2024-11-23T06:39:55,369 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/df2f15951535,33035,1732343993940 already deleted, retry=false 2024-11-23T06:39:55,369 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; df2f15951535,33035,1732343993940 expired; onlineServers=0 2024-11-23T06:39:55,369 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'df2f15951535,41847,1732343993779' ***** 2024-11-23T06:39:55,369 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T06:39:55,369 INFO [M:0;df2f15951535:41847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T06:39:55,369 INFO [M:0;df2f15951535:41847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T06:39:55,369 DEBUG [M:0;df2f15951535:41847 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T06:39:55,369 DEBUG [M:0;df2f15951535:41847 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T06:39:55,369 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T06:39:55,369 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343994287 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.large.0-1732343994287,5,FailOnTimeoutGroup] 2024-11-23T06:39:55,369 DEBUG [master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343994288 {}] cleaner.HFileCleaner(306): Exit Thread[master/df2f15951535:0:becomeActiveMaster-HFileCleaner.small.0-1732343994288,5,FailOnTimeoutGroup] 2024-11-23T06:39:55,369 INFO [M:0;df2f15951535:41847 {}] hbase.ChoreService(370): Chore service for: master/df2f15951535:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T06:39:55,370 INFO [M:0;df2f15951535:41847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T06:39:55,370 DEBUG [M:0;df2f15951535:41847 {}] master.HMaster(1795): Stopping service threads 2024-11-23T06:39:55,370 INFO [M:0;df2f15951535:41847 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T06:39:55,370 INFO [M:0;df2f15951535:41847 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T06:39:55,370 INFO [M:0;df2f15951535:41847 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T06:39:55,370 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T06:39:55,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T06:39:55,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T06:39:55,380 DEBUG [M:0;df2f15951535:41847 {}] zookeeper.ZKUtil(347): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T06:39:55,380 WARN [M:0;df2f15951535:41847 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T06:39:55,380 INFO [M:0;df2f15951535:41847 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/.lastflushedseqids 2024-11-23T06:39:55,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741838_1014 (size=99) 2024-11-23T06:39:55,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741838_1014 (size=99) 2024-11-23T06:39:55,386 INFO [M:0;df2f15951535:41847 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T06:39:55,386 INFO [M:0;df2f15951535:41847 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T06:39:55,386 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T06:39:55,386 INFO [M:0;df2f15951535:41847 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:55,386 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:55,386 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T06:39:55,386 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:55,387 INFO [M:0;df2f15951535:41847 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-23T06:39:55,401 DEBUG [M:0;df2f15951535:41847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5e7690c16bac40bbaec45f22defdd7fc is 82, key is hbase:meta,,1/info:regioninfo/1732343994932/Put/seqid=0 2024-11-23T06:39:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741839_1015 (size=5672) 2024-11-23T06:39:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741839_1015 (size=5672) 2024-11-23T06:39:55,407 INFO [M:0;df2f15951535:41847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5e7690c16bac40bbaec45f22defdd7fc 2024-11-23T06:39:55,424 DEBUG [M:0;df2f15951535:41847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1309dd8fdfb54c85b56ab493017571e6 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732343994994/Put/seqid=0 2024-11-23T06:39:55,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741840_1016 (size=5275) 2024-11-23T06:39:55,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741840_1016 (size=5275) 2024-11-23T06:39:55,429 INFO [M:0;df2f15951535:41847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1309dd8fdfb54c85b56ab493017571e6 2024-11-23T06:39:55,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,44895,1732343801767/df2f15951535%2C44895%2C1732343801767.1732343802004 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:55,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41685/user/jenkins/test-data/16ab0067-3794-c857-db6c-9d1e29a509e8/WALs/df2f15951535,41671,1732343800399/df2f15951535%2C41671%2C1732343800399.meta.1732343801471.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T06:39:55,448 DEBUG [M:0;df2f15951535:41847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f439918299f9422a9092b0af6098baee is 69, key is df2f15951535,33035,1732343993940/rs:state/1732343994415/Put/seqid=0 2024-11-23T06:39:55,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741841_1017 (size=5156) 2024-11-23T06:39:55,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741841_1017 (size=5156) 2024-11-23T06:39:55,453 INFO [M:0;df2f15951535:41847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f439918299f9422a9092b0af6098baee 2024-11-23T06:39:55,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:55,459 INFO [RS:0;df2f15951535:33035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:39:55,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33035-0x101666b914e0001, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:55,459 INFO [RS:0;df2f15951535:33035 {}] regionserver.HRegionServer(1031): Exiting; stopping=df2f15951535,33035,1732343993940; zookeeper connection closed. 2024-11-23T06:39:55,459 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a7cf558 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a7cf558 2024-11-23T06:39:55,459 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T06:39:55,479 DEBUG [M:0;df2f15951535:41847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/589ad9e8d0b34efbb0e33c3c36b7e4a3 is 52, key is load_balancer_on/state:d/1732343995079/Put/seqid=0 2024-11-23T06:39:55,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741842_1018 (size=5056) 2024-11-23T06:39:55,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741842_1018 (size=5056) 2024-11-23T06:39:55,485 INFO [M:0;df2f15951535:41847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/589ad9e8d0b34efbb0e33c3c36b7e4a3 2024-11-23T06:39:55,490 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5e7690c16bac40bbaec45f22defdd7fc as hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5e7690c16bac40bbaec45f22defdd7fc 2024-11-23T06:39:55,495 INFO [M:0;df2f15951535:41847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5e7690c16bac40bbaec45f22defdd7fc, entries=8, sequenceid=29, filesize=5.5 K 2024-11-23T06:39:55,496 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1309dd8fdfb54c85b56ab493017571e6 as hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1309dd8fdfb54c85b56ab493017571e6 2024-11-23T06:39:55,501 INFO [M:0;df2f15951535:41847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1309dd8fdfb54c85b56ab493017571e6, entries=3, sequenceid=29, filesize=5.2 K 2024-11-23T06:39:55,502 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f439918299f9422a9092b0af6098baee as hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f439918299f9422a9092b0af6098baee 2024-11-23T06:39:55,507 INFO [M:0;df2f15951535:41847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f439918299f9422a9092b0af6098baee, entries=1, sequenceid=29, filesize=5.0 K 2024-11-23T06:39:55,508 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/589ad9e8d0b34efbb0e33c3c36b7e4a3 as hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/589ad9e8d0b34efbb0e33c3c36b7e4a3 2024-11-23T06:39:55,512 INFO [M:0;df2f15951535:41847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40021/user/jenkins/test-data/f7e858c9-5178-3044-71a7-9a3d5c67f3bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/589ad9e8d0b34efbb0e33c3c36b7e4a3, entries=1, sequenceid=29, filesize=4.9 K 2024-11-23T06:39:55,513 INFO [M:0;df2f15951535:41847 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=29, compaction requested=false 2024-11-23T06:39:55,514 INFO [M:0;df2f15951535:41847 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T06:39:55,515 DEBUG [M:0;df2f15951535:41847 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732343995386Disabling compacts and flushes for region at 1732343995386Disabling writes for close at 1732343995386Obtaining lock to block concurrent updates at 1732343995387 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732343995387Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732343995387Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732343995387Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732343995387Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732343995401 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732343995401Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732343995411 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732343995424 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732343995424Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732343995433 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732343995447 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732343995447Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732343995459 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732343995478 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732343995478Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49430534: reopening flushed file at 1732343995489 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6451eedf: reopening flushed file at 1732343995495 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ee1115e: reopening flushed file at 1732343995501 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e75764b: reopening flushed file at 1732343995507 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=29, compaction requested=false at 1732343995513 (+6 ms)Writing region close event to WAL at 1732343995514 (+1 ms)Closed at 1732343995514 2024-11-23T06:39:55,515 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,515 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,515 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,515 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,515 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T06:39:55,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41339 is added to blk_1073741830_1006 (size=10311) 2024-11-23T06:39:55,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36087 is added to blk_1073741830_1006 (size=10311) 2024-11-23T06:39:55,518 INFO [M:0;df2f15951535:41847 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T06:39:55,518 INFO [M:0;df2f15951535:41847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41847 2024-11-23T06:39:55,518 INFO [M:0;df2f15951535:41847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T06:39:55,518 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T06:39:55,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:55,627 INFO [M:0;df2f15951535:41847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T06:39:55,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41847-0x101666b914e0000, quorum=127.0.0.1:55953, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T06:39:55,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1187e2f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:55,631 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@219dec45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:39:55,631 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:39:55,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624ed4c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:39:55,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae1ce13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir/,STOPPED} 2024-11-23T06:39:55,633 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:39:55,633 WARN [BP-1100694356-172.17.0.3-1732343990905 heartbeating to localhost/127.0.0.1:40021 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:39:55,633 WARN [BP-1100694356-172.17.0.3-1732343990905 heartbeating to localhost/127.0.0.1:40021 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1100694356-172.17.0.3-1732343990905 (Datanode Uuid b37b30cc-5a57-48e3-87c8-5d87780644dc) service to localhost/127.0.0.1:40021 2024-11-23T06:39:55,633 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:39:55,634 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data3/current/BP-1100694356-172.17.0.3-1732343990905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:55,634 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data4/current/BP-1100694356-172.17.0.3-1732343990905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:55,634 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:39:55,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7216654a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T06:39:55,641 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@534394c4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:39:55,641 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:39:55,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46761010{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:39:55,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e687b61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir/,STOPPED} 2024-11-23T06:39:55,642 WARN [BP-1100694356-172.17.0.3-1732343990905 heartbeating to localhost/127.0.0.1:40021 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T06:39:55,642 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T06:39:55,642 WARN [BP-1100694356-172.17.0.3-1732343990905 heartbeating to localhost/127.0.0.1:40021 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1100694356-172.17.0.3-1732343990905 (Datanode Uuid ca2f5264-7b7f-4f8c-abb9-17ed6b687235) service to localhost/127.0.0.1:40021 2024-11-23T06:39:55,642 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T06:39:55,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data1/current/BP-1100694356-172.17.0.3-1732343990905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:55,643 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/cluster_169e5b38-49ae-8142-a8b7-27534bc0a59c/data/data2/current/BP-1100694356-172.17.0.3-1732343990905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T06:39:55,643 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T06:39:55,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@87b2e2b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T06:39:55,647 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e58a9be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T06:39:55,647 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T06:39:55,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2191d18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T06:39:55,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d7e0513{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/367d18eb-353a-66bd-7c03-489bfb138096/hadoop.log.dir/,STOPPED} 2024-11-23T06:39:55,653 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T06:39:55,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T06:39:55,676 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 230) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40021 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40021 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40021 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40021 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40021 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40021 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40021 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40021 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 517) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=110 (was 94) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7419 (was 7451)